blob: cfc599e01b7660cdc592fdcf6f6f5ce208daa2fb [file] [log] [blame]
Peter Stuge33fefca2009-01-26 01:33:02 +00001/*
2 * This file is part of the flashrom project.
3 *
4 * Copyright (C) 2009 Peter Stuge <peter@stuge.se>
Stefan Reinauer8fa64812009-08-12 09:27:45 +00005 * Copyright (C) 2009 coresystems GmbH
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +00006 * Copyright (C) 2010 Carl-Daniel Hailfinger
Rudolf Marek03ae5c12010-03-16 23:59:19 +00007 * Copyright (C) 2010 Rudolf Marek <r.marek@assembler.cz>
Peter Stuge33fefca2009-01-26 01:33:02 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
Peter Stuge33fefca2009-01-26 01:33:02 +000017 */
Uwe Hermann7b2969b2009-04-15 10:52:49 +000018
Carl-Daniel Hailfinger831e8f42010-05-30 22:24:40 +000019#include <unistd.h>
Stefan Tauner7fb5aa02013-08-14 15:48:44 +000020#include <stdbool.h>
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010021#include <stdint.h>
Carl-Daniel Hailfinger831e8f42010-05-30 22:24:40 +000022#include <stdio.h>
Stefan Reinauer0593f212009-01-26 01:10:48 +000023#include <stdlib.h>
Stefan Reinauer8fa64812009-08-12 09:27:45 +000024#include <string.h>
Carl-Daniel Hailfinger11990da2013-07-13 23:21:05 +000025#include <errno.h>
Stefan Reinauer0593f212009-01-26 01:10:48 +000026#include "flash.h"
Thomas Heijligenc92f94b2022-03-17 13:41:17 +010027#include "platform.h"
Thomas Heijligen74b4aa02021-12-14 17:52:30 +010028#include "hwaccess_physmap.h"
Stefan Reinauer0593f212009-01-26 01:10:48 +000029
Patrick Georgia9095a92010-09-30 17:03:32 +000030#if !defined(__DJGPP__) && !defined(__LIBPAYLOAD__)
Carl-Daniel Hailfinger11990da2013-07-13 23:21:05 +000031/* No file access needed/possible to get mmap access permissions or access MSR. */
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010032#include <unistd.h>
Carl-Daniel Hailfingerdcef67e2010-06-21 23:20:15 +000033#include <sys/stat.h>
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010034#include <sys/types.h>
Carl-Daniel Hailfingerdcef67e2010-06-21 23:20:15 +000035#include <fcntl.h>
Carl-Daniel Hailfingerdcef67e2010-06-21 23:20:15 +000036#endif
37
Rudolf Marek03ae5c12010-03-16 23:59:19 +000038#ifdef __DJGPP__
39#include <dpmi.h>
Rudolf Marek25fde402017-12-29 16:30:49 +010040#include <malloc.h>
Rudolf Marek837d8102010-04-25 22:47:50 +000041#include <sys/nearptr.h>
Rudolf Marek03ae5c12010-03-16 23:59:19 +000042
Rudolf Marek25fde402017-12-29 16:30:49 +010043#define ONE_MEGABYTE (1024 * 1024)
Rudolf Marek03ae5c12010-03-16 23:59:19 +000044#define MEM_DEV "dpmi"
45
Rudolf Marek25fde402017-12-29 16:30:49 +010046static void *realmem_map_aligned;
Rudolf Marek837d8102010-04-25 22:47:50 +000047
Stefan Tauner305e0b92013-07-17 23:46:44 +000048static void *map_first_meg(uintptr_t phys_addr, size_t len)
Rudolf Marek837d8102010-04-25 22:47:50 +000049{
Rudolf Marek25fde402017-12-29 16:30:49 +010050 void *realmem_map;
51 size_t pagesize;
Rudolf Marek837d8102010-04-25 22:47:50 +000052
Rudolf Marek25fde402017-12-29 16:30:49 +010053 if (realmem_map_aligned)
54 return realmem_map_aligned + phys_addr;
55
56 /* valloc() from DJGPP 2.05 does not work properly */
57 pagesize = getpagesize();
58
59 realmem_map = malloc(ONE_MEGABYTE + pagesize);
Rudolf Marek837d8102010-04-25 22:47:50 +000060
Uwe Hermann91f4afa2011-07-28 08:13:25 +000061 if (!realmem_map)
Patrick Georgied7a9642010-09-25 22:53:44 +000062 return ERROR_PTR;
Rudolf Marek837d8102010-04-25 22:47:50 +000063
Rudolf Marek25fde402017-12-29 16:30:49 +010064 realmem_map_aligned = (void *)(((size_t) realmem_map +
65 (pagesize - 1)) & ~(pagesize - 1));
66
67 if (__djgpp_map_physical_memory(realmem_map_aligned, ONE_MEGABYTE, 0)) {
Carl-Daniel Hailfinger602de982010-10-05 23:21:51 +000068 free(realmem_map);
Rudolf Marek25fde402017-12-29 16:30:49 +010069 realmem_map_aligned = NULL;
Patrick Georgied7a9642010-09-25 22:53:44 +000070 return ERROR_PTR;
Rudolf Marek837d8102010-04-25 22:47:50 +000071 }
72
Rudolf Marek25fde402017-12-29 16:30:49 +010073 return realmem_map_aligned + phys_addr;
Rudolf Marek837d8102010-04-25 22:47:50 +000074}
Rudolf Marek03ae5c12010-03-16 23:59:19 +000075
Stefan Tauner305e0b92013-07-17 23:46:44 +000076static void *sys_physmap(uintptr_t phys_addr, size_t len)
Rudolf Marek03ae5c12010-03-16 23:59:19 +000077{
78 int ret;
79 __dpmi_meminfo mi;
80
Uwe Hermann91f4afa2011-07-28 08:13:25 +000081 /* Enable 4GB limit on DS descriptor. */
82 if (!__djgpp_nearptr_enable())
Patrick Georgied7a9642010-09-25 22:53:44 +000083 return ERROR_PTR;
Rudolf Marek837d8102010-04-25 22:47:50 +000084
Rudolf Marek25fde402017-12-29 16:30:49 +010085 if ((phys_addr + len - 1) < ONE_MEGABYTE) {
Uwe Hermann91f4afa2011-07-28 08:13:25 +000086 /* We need to use another method to map first 1MB. */
Rudolf Marek837d8102010-04-25 22:47:50 +000087 return map_first_meg(phys_addr, len);
Rudolf Marek03ae5c12010-03-16 23:59:19 +000088 }
89
90 mi.address = phys_addr;
91 mi.size = len;
Carl-Daniel Hailfinger082c8b52011-08-15 19:54:20 +000092 ret = __dpmi_physical_address_mapping(&mi);
Rudolf Marek03ae5c12010-03-16 23:59:19 +000093
Uwe Hermann91f4afa2011-07-28 08:13:25 +000094 if (ret != 0)
Patrick Georgied7a9642010-09-25 22:53:44 +000095 return ERROR_PTR;
Rudolf Marek03ae5c12010-03-16 23:59:19 +000096
Rudolf Marek837d8102010-04-25 22:47:50 +000097 return (void *) mi.address + __djgpp_conventional_base;
Rudolf Marek03ae5c12010-03-16 23:59:19 +000098}
99
100#define sys_physmap_rw_uncached sys_physmap
Rudolf Marek837d8102010-04-25 22:47:50 +0000101#define sys_physmap_ro_cached sys_physmap
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000102
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600103static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000104{
105 __dpmi_meminfo mi;
106
Carl-Daniel Hailfinger602de982010-10-05 23:21:51 +0000107 /* There is no known way to unmap the first 1 MB. The DPMI server will
108 * do this for us on exit.
109 */
Rudolf Marek25fde402017-12-29 16:30:49 +0100110 if ((virt_addr >= realmem_map_aligned) &&
111 ((virt_addr + len) <= (realmem_map_aligned + ONE_MEGABYTE))) {
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000112 return;
113 }
114
115 mi.address = (unsigned long) virt_addr;
116 __dpmi_free_physical_address_mapping(&mi);
117}
118
Patrick Georgia9095a92010-09-30 17:03:32 +0000119#elif defined(__LIBPAYLOAD__)
120#include <arch/virtual.h>
121
122#define MEM_DEV ""
123
Thomas Heijligend4390882022-11-18 22:53:56 +0100124static void *sys_physmap(uintptr_t phys_addr, size_t len)
Patrick Georgia9095a92010-09-30 17:03:32 +0000125{
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000126 return (void *)phys_to_virt(phys_addr);
Patrick Georgia9095a92010-09-30 17:03:32 +0000127}
128
129#define sys_physmap_rw_uncached sys_physmap
130#define sys_physmap_ro_cached sys_physmap
131
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600132static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Patrick Georgia9095a92010-09-30 17:03:32 +0000133{
134}
Carl-Daniel Hailfinger60d9bd22012-08-09 23:34:41 +0000135#elif defined(__MACH__) && defined(__APPLE__)
Thomas Heijligene0dff112022-03-18 20:47:13 +0100136#include <DirectHW/DirectHW.h>
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000137
Stefan Reinauer83704c52011-03-18 22:00:15 +0000138#define MEM_DEV "DirectHW"
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000139
Stefan Tauner305e0b92013-07-17 23:46:44 +0000140static void *sys_physmap(uintptr_t phys_addr, size_t len)
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000141{
Patrick Georgied7a9642010-09-25 22:53:44 +0000142 /* The short form of ?: is a GNU extension.
143 * FIXME: map_physical returns NULL both for errors and for success
144 * if the region is mapped at virtual address zero. If in doubt, report
145 * an error until a better interface exists.
146 */
147 return map_physical(phys_addr, len) ? : ERROR_PTR;
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000148}
149
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000150/* The OS X driver does not differentiate between mapping types. */
151#define sys_physmap_rw_uncached sys_physmap
152#define sys_physmap_ro_cached sys_physmap
153
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600154static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000155{
156 unmap_physical(virt_addr, len);
157}
158
159#else
160#include <sys/mman.h>
161
Stefan Reinauer0593f212009-01-26 01:10:48 +0000162#if defined (__sun) && (defined(__i386) || defined(__amd64))
163# define MEM_DEV "/dev/xsvc"
164#else
165# define MEM_DEV "/dev/mem"
166#endif
167
168static int fd_mem = -1;
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000169static int fd_mem_cached = -1;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000170
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000171/* For MMIO access. Must be uncached, doesn't make sense to restrict to ro. */
Stefan Tauner305e0b92013-07-17 23:46:44 +0000172static void *sys_physmap_rw_uncached(uintptr_t phys_addr, size_t len)
Stefan Reinauer0593f212009-01-26 01:10:48 +0000173{
174 void *virt_addr;
175
176 if (-1 == fd_mem) {
177 /* Open the memory device UNCACHED. Important for MMIO. */
Uwe Hermann7b2969b2009-04-15 10:52:49 +0000178 if (-1 == (fd_mem = open(MEM_DEV, O_RDWR | O_SYNC))) {
Stefan Tauner363fd7e2013-04-07 13:08:30 +0000179 msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000180 return ERROR_PTR;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000181 }
182 }
183
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000184 virt_addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, fd_mem, (off_t)phys_addr);
Patrick Georgied7a9642010-09-25 22:53:44 +0000185 return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000186}
187
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000188/* For reading DMI/coreboot/whatever tables. We should never write, and we
189 * do not care about caching.
190 */
Stefan Tauner305e0b92013-07-17 23:46:44 +0000191static void *sys_physmap_ro_cached(uintptr_t phys_addr, size_t len)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000192{
193 void *virt_addr;
194
195 if (-1 == fd_mem_cached) {
196 /* Open the memory device CACHED. */
197 if (-1 == (fd_mem_cached = open(MEM_DEV, O_RDWR))) {
Stefan Tauner363fd7e2013-04-07 13:08:30 +0000198 msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000199 return ERROR_PTR;
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000200 }
201 }
202
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000203 virt_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd_mem_cached, (off_t)phys_addr);
Patrick Georgied7a9642010-09-25 22:53:44 +0000204 return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000205}
206
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600207static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Stefan Reinauer0593f212009-01-26 01:10:48 +0000208{
209 munmap(virt_addr, len);
210}
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000211#endif
Stefan Reinauer0593f212009-01-26 01:10:48 +0000212
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000213#define PHYSM_RW 0
214#define PHYSM_RO 1
215#define PHYSM_NOCLEANUP 0
216#define PHYSM_CLEANUP 1
217#define PHYSM_EXACT 0
218#define PHYSM_ROUND 1
219
220/* Round start to nearest page boundary below and set len so that the resulting address range ends at the lowest
221 * possible page boundary where the original address range is still entirely contained. It returns the
222 * difference between the rounded start address and the original start address. */
223static uintptr_t round_to_page_boundaries(uintptr_t *start, size_t *len)
224{
225 uintptr_t page_size = getpagesize();
226 uintptr_t page_mask = ~(page_size-1);
227 uintptr_t end = *start + *len;
228 uintptr_t old_start = *start;
229 msg_gspew("page_size=%" PRIxPTR "\n", page_size);
230 msg_gspew("pre-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
231 PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, end);
232 *start = *start & page_mask;
233 end = (end + page_size - 1) & page_mask;
234 *len = end - *start;
235 msg_gspew("post-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
236 PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, *start + *len);
237 return old_start - *start;
238}
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000239
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000240struct undo_physmap_data {
241 void *virt_addr;
242 size_t len;
243};
244
245static int undo_physmap(void *data)
246{
247 if (data == NULL) {
248 msg_perr("%s: tried to physunmap without valid data!\n", __func__);
249 return 1;
250 }
251 struct undo_physmap_data *d = data;
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000252 physunmap_unaligned(d->virt_addr, d->len);
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000253 free(data);
254 return 0;
255}
256
Niklas Söderlund5d307202013-09-14 09:02:27 +0000257static void *physmap_common(const char *descr, uintptr_t phys_addr, size_t len, bool readonly, bool autocleanup,
258 bool round)
Stefan Reinauer0593f212009-01-26 01:10:48 +0000259{
Stephan Guilloux6d08a3e2009-06-23 10:44:36 +0000260 void *virt_addr;
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000261 uintptr_t offset = 0;
Stephan Guilloux6d08a3e2009-06-23 10:44:36 +0000262
Carl-Daniel Hailfinger1455b2b2009-05-11 14:13:25 +0000263 if (len == 0) {
Stefan Tauner305e0b92013-07-17 23:46:44 +0000264 msg_pspew("Not mapping %s, zero size at 0x%0*" PRIxPTR ".\n", descr, PRIxPTR_WIDTH, phys_addr);
Patrick Georgied7a9642010-09-25 22:53:44 +0000265 return ERROR_PTR;
Carl-Daniel Hailfinger1455b2b2009-05-11 14:13:25 +0000266 }
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000267
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000268 if (round)
269 offset = round_to_page_boundaries(&phys_addr, &len);
Carl-Daniel Hailfinger1455b2b2009-05-11 14:13:25 +0000270
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000271 if (readonly)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000272 virt_addr = sys_physmap_ro_cached(phys_addr, len);
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000273 else
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000274 virt_addr = sys_physmap_rw_uncached(phys_addr, len);
Stefan Reinauer0593f212009-01-26 01:10:48 +0000275
Patrick Georgied7a9642010-09-25 22:53:44 +0000276 if (ERROR_PTR == virt_addr) {
Stefan Reinauer0593f212009-01-26 01:10:48 +0000277 if (NULL == descr)
278 descr = "memory";
Stefan Tauner0554ca52013-07-25 22:54:25 +0000279 msg_perr("Error accessing %s, 0x%zx bytes at 0x%0*" PRIxPTR "\n",
280 descr, len, PRIxPTR_WIDTH, phys_addr);
Stefan Tauner363fd7e2013-04-07 13:08:30 +0000281 msg_perr(MEM_DEV " mmap failed: %s\n", strerror(errno));
Sean Nelson316a29f2010-05-07 20:09:04 +0000282#ifdef __linux__
Stefan Reinauer0593f212009-01-26 01:10:48 +0000283 if (EINVAL == errno) {
Sean Nelson316a29f2010-05-07 20:09:04 +0000284 msg_perr("In Linux this error can be caused by the CONFIG_NONPROMISC_DEVMEM (<2.6.27),\n");
285 msg_perr("CONFIG_STRICT_DEVMEM (>=2.6.27) and CONFIG_X86_PAT kernel options.\n");
286 msg_perr("Please check if either is enabled in your kernel before reporting a failure.\n");
287 msg_perr("You can override CONFIG_X86_PAT at boot with the nopat kernel parameter but\n");
288 msg_perr("disabling the other option unfortunately requires a kernel recompile. Sorry!\n");
Stefan Reinauer0593f212009-01-26 01:10:48 +0000289 }
Carl-Daniel Hailfingerb63b0672010-07-02 17:12:50 +0000290#elif defined (__OpenBSD__)
291 msg_perr("Please set securelevel=-1 in /etc/rc.securelevel "
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000292 "and reboot, or reboot into\n"
293 "single user mode.\n");
Sean Nelson316a29f2010-05-07 20:09:04 +0000294#endif
Niklas Söderlund5d307202013-09-14 09:02:27 +0000295 return ERROR_PTR;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000296 }
297
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000298 if (autocleanup) {
Angel Pons690a9442021-06-07 12:33:53 +0200299 struct undo_physmap_data *d = malloc(sizeof(*d));
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000300 if (d == NULL) {
301 msg_perr("%s: Out of memory!\n", __func__);
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000302 physunmap_unaligned(virt_addr, len);
Niklas Söderlund5d307202013-09-14 09:02:27 +0000303 return ERROR_PTR;
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000304 }
305
306 d->virt_addr = virt_addr;
307 d->len = len;
308 if (register_shutdown(undo_physmap, d) != 0) {
309 msg_perr("%s: Could not register shutdown function!\n", __func__);
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000310 physunmap_unaligned(virt_addr, len);
Niklas Söderlund5d307202013-09-14 09:02:27 +0000311 return ERROR_PTR;
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000312 }
313 }
314
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000315 return virt_addr + offset;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000316}
Stefan Reinauer8fa64812009-08-12 09:27:45 +0000317
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000318void physunmap_unaligned(void *virt_addr, size_t len)
319{
320 /* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
321 if (virt_addr == ERROR_PTR) {
322 msg_perr("Trying to unmap a nonexisting mapping!\n"
Nico Huberac90af62022-12-18 00:22:47 +0000323 "Please report a bug at flashrom-stable@flashrom.org\n");
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000324 return;
325 }
326
327 sys_physunmap_unaligned(virt_addr, len);
328}
329
330void physunmap(void *virt_addr, size_t len)
331{
332 uintptr_t tmp;
333
334 /* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
335 if (virt_addr == ERROR_PTR) {
336 msg_perr("Trying to unmap a nonexisting mapping!\n"
Nico Huberac90af62022-12-18 00:22:47 +0000337 "Please report a bug at flashrom-stable@flashrom.org\n");
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000338 return;
339 }
340 tmp = (uintptr_t)virt_addr;
341 /* We assume that the virtual address of a page-aligned physical address is page-aligned as well. By
342 * extension, rounding a virtual unaligned address as returned by physmap should yield the same offset
343 * between rounded and original virtual address as between rounded and original physical address.
344 */
345 round_to_page_boundaries(&tmp, &len);
346 virt_addr = (void *)tmp;
347 physunmap_unaligned(virt_addr, len);
348}
349
Stefan Tauner305e0b92013-07-17 23:46:44 +0000350void *physmap(const char *descr, uintptr_t phys_addr, size_t len)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000351{
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000352 return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_NOCLEANUP, PHYSM_ROUND);
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000353}
354
355void *rphysmap(const char *descr, uintptr_t phys_addr, size_t len)
356{
Niklas Söderlund5d307202013-09-14 09:02:27 +0000357 return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_CLEANUP, PHYSM_ROUND);
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000358}
359
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000360void *physmap_ro(const char *descr, uintptr_t phys_addr, size_t len)
Sean Nelson4c6d3a42013-09-11 23:35:03 +0000361{
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000362 return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_ROUND);
Sean Nelson4c6d3a42013-09-11 23:35:03 +0000363}
364
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000365void *physmap_ro_unaligned(const char *descr, uintptr_t phys_addr, size_t len)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000366{
Niklas Söderlund5d307202013-09-14 09:02:27 +0000367 return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_EXACT);
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000368}
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +0100369
370/* Prevent reordering and/or merging of reads/writes to hardware.
371 * Such reordering and/or merging would break device accesses which depend on the exact access order.
372 */
373static inline void sync_primitive(void)
374{
375/* This is not needed for...
376 * - x86: uses uncached accesses which have a strongly ordered memory model.
377 * - MIPS: uses uncached accesses in mode 2 on /dev/mem which has also a strongly ordered memory model.
378 * - ARM: uses a strongly ordered memory model for device memories.
379 *
380 * See also https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/memory-barriers.txt
381 */
382// cf. http://lxr.free-electrons.com/source/arch/powerpc/include/asm/barrier.h
383#if defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) || defined(__POWERPC__) || \
384 defined(__ppc__) || defined(__ppc64__) || defined(_M_PPC) || defined(_ARCH_PPC) || \
385 defined(_ARCH_PPC64) || defined(__ppc)
Rosen Penev91ed99f2022-03-12 23:54:18 -0800386 __asm__("eieio" : : : "memory");
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +0100387#elif (__sparc__) || defined (__sparc)
388#if defined(__sparc_v9__) || defined(__sparcv9)
389 /* Sparc V9 CPUs support three different memory orderings that range from x86-like TSO to PowerPC-like
390 * RMO. The modes can be switched at runtime thus to make sure we maintain the right order of access we
391 * use the strongest hardware memory barriers that exist on Sparc V9. */
Rosen Penev91ed99f2022-03-12 23:54:18 -0800392 __asm__ volatile ("membar #Sync" ::: "memory");
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +0100393#elif defined(__sparc_v8__) || defined(__sparcv8)
394 /* On SPARC V8 there is no RMO just PSO and that does not apply to I/O accesses... but if V8 code is run
395 * on V9 CPUs it might apply... or not... we issue a write barrier anyway. That's the most suitable
396 * operation in the V8 instruction set anyway. If you know better then please tell us. */
Rosen Penev91ed99f2022-03-12 23:54:18 -0800397 __asm__ volatile ("stbar");
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +0100398#else
399 #error Unknown and/or unsupported SPARC instruction set version detected.
400#endif
401#endif
402}
403
404void mmio_writeb(uint8_t val, void *addr)
405{
406 *(volatile uint8_t *) addr = val;
407 sync_primitive();
408}
409
410void mmio_writew(uint16_t val, void *addr)
411{
412 *(volatile uint16_t *) addr = val;
413 sync_primitive();
414}
415
416void mmio_writel(uint32_t val, void *addr)
417{
418 *(volatile uint32_t *) addr = val;
419 sync_primitive();
420}
421
422uint8_t mmio_readb(const void *addr)
423{
424 return *(volatile const uint8_t *) addr;
425}
426
427uint16_t mmio_readw(const void *addr)
428{
429 return *(volatile const uint16_t *) addr;
430}
431
432uint32_t mmio_readl(const void *addr)
433{
434 return *(volatile const uint32_t *) addr;
435}
436
437void mmio_readn(const void *addr, uint8_t *buf, size_t len)
438{
439 memcpy(buf, addr, len);
440 return;
441}
442
443void mmio_le_writeb(uint8_t val, void *addr)
444{
445 mmio_writeb(cpu_to_le8(val), addr);
446}
447
448void mmio_le_writew(uint16_t val, void *addr)
449{
450 mmio_writew(cpu_to_le16(val), addr);
451}
452
453void mmio_le_writel(uint32_t val, void *addr)
454{
455 mmio_writel(cpu_to_le32(val), addr);
456}
457
458uint8_t mmio_le_readb(const void *addr)
459{
460 return le_to_cpu8(mmio_readb(addr));
461}
462
463uint16_t mmio_le_readw(const void *addr)
464{
465 return le_to_cpu16(mmio_readw(addr));
466}
467
468uint32_t mmio_le_readl(const void *addr)
469{
470 return le_to_cpu32(mmio_readl(addr));
471}
472
473enum mmio_write_type {
474 mmio_write_type_b,
475 mmio_write_type_w,
476 mmio_write_type_l,
477};
478
479struct undo_mmio_write_data {
480 void *addr;
481 int reg;
482 enum mmio_write_type type;
483 union {
484 uint8_t bdata;
485 uint16_t wdata;
486 uint32_t ldata;
487 };
488};
489
490static int undo_mmio_write(void *p)
491{
492 struct undo_mmio_write_data *data = p;
493 msg_pdbg("Restoring MMIO space at %p\n", data->addr);
494 switch (data->type) {
495 case mmio_write_type_b:
496 mmio_writeb(data->bdata, data->addr);
497 break;
498 case mmio_write_type_w:
499 mmio_writew(data->wdata, data->addr);
500 break;
501 case mmio_write_type_l:
502 mmio_writel(data->ldata, data->addr);
503 break;
504 }
505 /* p was allocated in register_undo_mmio_write. */
506 free(p);
507 return 0;
508}
509
510#define register_undo_mmio_write(a, c) \
511{ \
512 struct undo_mmio_write_data *undo_mmio_write_data; \
513 undo_mmio_write_data = malloc(sizeof(*undo_mmio_write_data)); \
514 if (!undo_mmio_write_data) { \
515 msg_gerr("Out of memory!\n"); \
516 exit(1); \
517 } \
518 undo_mmio_write_data->addr = a; \
519 undo_mmio_write_data->type = mmio_write_type_##c; \
520 undo_mmio_write_data->c##data = mmio_read##c(a); \
521 register_shutdown(undo_mmio_write, undo_mmio_write_data); \
522}
523
524#define register_undo_mmio_writeb(a) register_undo_mmio_write(a, b)
525#define register_undo_mmio_writew(a) register_undo_mmio_write(a, w)
526#define register_undo_mmio_writel(a) register_undo_mmio_write(a, l)
527
528void rmmio_writeb(uint8_t val, void *addr)
529{
530 register_undo_mmio_writeb(addr);
531 mmio_writeb(val, addr);
532}
533
534void rmmio_writew(uint16_t val, void *addr)
535{
536 register_undo_mmio_writew(addr);
537 mmio_writew(val, addr);
538}
539
540void rmmio_writel(uint32_t val, void *addr)
541{
542 register_undo_mmio_writel(addr);
543 mmio_writel(val, addr);
544}
545
546void rmmio_le_writeb(uint8_t val, void *addr)
547{
548 register_undo_mmio_writeb(addr);
549 mmio_le_writeb(val, addr);
550}
551
552void rmmio_le_writew(uint16_t val, void *addr)
553{
554 register_undo_mmio_writew(addr);
555 mmio_le_writew(val, addr);
556}
557
558void rmmio_le_writel(uint32_t val, void *addr)
559{
560 register_undo_mmio_writel(addr);
561 mmio_le_writel(val, addr);
562}
563
564void rmmio_valb(void *addr)
565{
566 register_undo_mmio_writeb(addr);
567}
568
569void rmmio_valw(void *addr)
570{
571 register_undo_mmio_writew(addr);
572}
573
574void rmmio_vall(void *addr)
575{
576 register_undo_mmio_writel(addr);
577}