blob: 8ea05cf0ebbaa80d285acf977b4b6b9ad4788274 [file] [log] [blame]
Peter Stuge33fefca2009-01-26 01:33:02 +00001/*
2 * This file is part of the flashrom project.
3 *
4 * Copyright (C) 2009 Peter Stuge <peter@stuge.se>
Stefan Reinauer8fa64812009-08-12 09:27:45 +00005 * Copyright (C) 2009 coresystems GmbH
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +00006 * Copyright (C) 2010 Carl-Daniel Hailfinger
Rudolf Marek03ae5c12010-03-16 23:59:19 +00007 * Copyright (C) 2010 Rudolf Marek <r.marek@assembler.cz>
Peter Stuge33fefca2009-01-26 01:33:02 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
Peter Stuge33fefca2009-01-26 01:33:02 +000017 */
Uwe Hermann7b2969b2009-04-15 10:52:49 +000018
Carl-Daniel Hailfinger831e8f42010-05-30 22:24:40 +000019#include <unistd.h>
Stefan Tauner7fb5aa02013-08-14 15:48:44 +000020#include <stdbool.h>
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010021#include <stdint.h>
Carl-Daniel Hailfinger831e8f42010-05-30 22:24:40 +000022#include <stdio.h>
Stefan Reinauer0593f212009-01-26 01:10:48 +000023#include <stdlib.h>
Stefan Reinauer8fa64812009-08-12 09:27:45 +000024#include <string.h>
Carl-Daniel Hailfinger11990da2013-07-13 23:21:05 +000025#include <errno.h>
Stefan Reinauer0593f212009-01-26 01:10:48 +000026#include "flash.h"
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010027#include "hwaccess.h"
Thomas Heijligen74b4aa02021-12-14 17:52:30 +010028#include "hwaccess_physmap.h"
Stefan Reinauer0593f212009-01-26 01:10:48 +000029
Patrick Georgia9095a92010-09-30 17:03:32 +000030#if !defined(__DJGPP__) && !defined(__LIBPAYLOAD__)
Carl-Daniel Hailfinger11990da2013-07-13 23:21:05 +000031/* No file access needed/possible to get mmap access permissions or access MSR. */
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010032#include <unistd.h>
Carl-Daniel Hailfingerdcef67e2010-06-21 23:20:15 +000033#include <sys/stat.h>
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +010034#include <sys/types.h>
Carl-Daniel Hailfingerdcef67e2010-06-21 23:20:15 +000035#include <fcntl.h>
Carl-Daniel Hailfingerdcef67e2010-06-21 23:20:15 +000036#endif
37
Rudolf Marek03ae5c12010-03-16 23:59:19 +000038#ifdef __DJGPP__
39#include <dpmi.h>
Rudolf Marek25fde402017-12-29 16:30:49 +010040#include <malloc.h>
Rudolf Marek837d8102010-04-25 22:47:50 +000041#include <sys/nearptr.h>
Rudolf Marek03ae5c12010-03-16 23:59:19 +000042
Rudolf Marek25fde402017-12-29 16:30:49 +010043#define ONE_MEGABYTE (1024 * 1024)
Rudolf Marek03ae5c12010-03-16 23:59:19 +000044#define MEM_DEV "dpmi"
45
Rudolf Marek25fde402017-12-29 16:30:49 +010046static void *realmem_map_aligned;
Rudolf Marek837d8102010-04-25 22:47:50 +000047
Stefan Tauner305e0b92013-07-17 23:46:44 +000048static void *map_first_meg(uintptr_t phys_addr, size_t len)
Rudolf Marek837d8102010-04-25 22:47:50 +000049{
Rudolf Marek25fde402017-12-29 16:30:49 +010050 void *realmem_map;
51 size_t pagesize;
Rudolf Marek837d8102010-04-25 22:47:50 +000052
Rudolf Marek25fde402017-12-29 16:30:49 +010053 if (realmem_map_aligned)
54 return realmem_map_aligned + phys_addr;
55
56 /* valloc() from DJGPP 2.05 does not work properly */
57 pagesize = getpagesize();
58
59 realmem_map = malloc(ONE_MEGABYTE + pagesize);
Rudolf Marek837d8102010-04-25 22:47:50 +000060
Uwe Hermann91f4afa2011-07-28 08:13:25 +000061 if (!realmem_map)
Patrick Georgied7a9642010-09-25 22:53:44 +000062 return ERROR_PTR;
Rudolf Marek837d8102010-04-25 22:47:50 +000063
Rudolf Marek25fde402017-12-29 16:30:49 +010064 realmem_map_aligned = (void *)(((size_t) realmem_map +
65 (pagesize - 1)) & ~(pagesize - 1));
66
67 if (__djgpp_map_physical_memory(realmem_map_aligned, ONE_MEGABYTE, 0)) {
Carl-Daniel Hailfinger602de982010-10-05 23:21:51 +000068 free(realmem_map);
Rudolf Marek25fde402017-12-29 16:30:49 +010069 realmem_map_aligned = NULL;
Patrick Georgied7a9642010-09-25 22:53:44 +000070 return ERROR_PTR;
Rudolf Marek837d8102010-04-25 22:47:50 +000071 }
72
Rudolf Marek25fde402017-12-29 16:30:49 +010073 return realmem_map_aligned + phys_addr;
Rudolf Marek837d8102010-04-25 22:47:50 +000074}
Rudolf Marek03ae5c12010-03-16 23:59:19 +000075
Stefan Tauner305e0b92013-07-17 23:46:44 +000076static void *sys_physmap(uintptr_t phys_addr, size_t len)
Rudolf Marek03ae5c12010-03-16 23:59:19 +000077{
78 int ret;
79 __dpmi_meminfo mi;
80
Uwe Hermann91f4afa2011-07-28 08:13:25 +000081 /* Enable 4GB limit on DS descriptor. */
82 if (!__djgpp_nearptr_enable())
Patrick Georgied7a9642010-09-25 22:53:44 +000083 return ERROR_PTR;
Rudolf Marek837d8102010-04-25 22:47:50 +000084
Rudolf Marek25fde402017-12-29 16:30:49 +010085 if ((phys_addr + len - 1) < ONE_MEGABYTE) {
Uwe Hermann91f4afa2011-07-28 08:13:25 +000086 /* We need to use another method to map first 1MB. */
Rudolf Marek837d8102010-04-25 22:47:50 +000087 return map_first_meg(phys_addr, len);
Rudolf Marek03ae5c12010-03-16 23:59:19 +000088 }
89
90 mi.address = phys_addr;
91 mi.size = len;
Carl-Daniel Hailfinger082c8b52011-08-15 19:54:20 +000092 ret = __dpmi_physical_address_mapping(&mi);
Rudolf Marek03ae5c12010-03-16 23:59:19 +000093
Uwe Hermann91f4afa2011-07-28 08:13:25 +000094 if (ret != 0)
Patrick Georgied7a9642010-09-25 22:53:44 +000095 return ERROR_PTR;
Rudolf Marek03ae5c12010-03-16 23:59:19 +000096
Rudolf Marek837d8102010-04-25 22:47:50 +000097 return (void *) mi.address + __djgpp_conventional_base;
Rudolf Marek03ae5c12010-03-16 23:59:19 +000098}
99
100#define sys_physmap_rw_uncached sys_physmap
Rudolf Marek837d8102010-04-25 22:47:50 +0000101#define sys_physmap_ro_cached sys_physmap
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000102
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600103static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000104{
105 __dpmi_meminfo mi;
106
Carl-Daniel Hailfinger602de982010-10-05 23:21:51 +0000107 /* There is no known way to unmap the first 1 MB. The DPMI server will
108 * do this for us on exit.
109 */
Rudolf Marek25fde402017-12-29 16:30:49 +0100110 if ((virt_addr >= realmem_map_aligned) &&
111 ((virt_addr + len) <= (realmem_map_aligned + ONE_MEGABYTE))) {
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000112 return;
113 }
114
115 mi.address = (unsigned long) virt_addr;
116 __dpmi_free_physical_address_mapping(&mi);
117}
118
Patrick Georgia9095a92010-09-30 17:03:32 +0000119#elif defined(__LIBPAYLOAD__)
120#include <arch/virtual.h>
121
122#define MEM_DEV ""
123
Stefan Tauner305e0b92013-07-17 23:46:44 +0000124void *sys_physmap(uintptr_t phys_addr, size_t len)
Patrick Georgia9095a92010-09-30 17:03:32 +0000125{
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000126 return (void *)phys_to_virt(phys_addr);
Patrick Georgia9095a92010-09-30 17:03:32 +0000127}
128
129#define sys_physmap_rw_uncached sys_physmap
130#define sys_physmap_ro_cached sys_physmap
131
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600132static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Patrick Georgia9095a92010-09-30 17:03:32 +0000133{
134}
Carl-Daniel Hailfinger60d9bd22012-08-09 23:34:41 +0000135#elif defined(__MACH__) && defined(__APPLE__)
Rudolf Marek03ae5c12010-03-16 23:59:19 +0000136
Stefan Reinauer83704c52011-03-18 22:00:15 +0000137#define MEM_DEV "DirectHW"
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000138
Stefan Tauner305e0b92013-07-17 23:46:44 +0000139static void *sys_physmap(uintptr_t phys_addr, size_t len)
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000140{
Patrick Georgied7a9642010-09-25 22:53:44 +0000141 /* The short form of ?: is a GNU extension.
142 * FIXME: map_physical returns NULL both for errors and for success
143 * if the region is mapped at virtual address zero. If in doubt, report
144 * an error until a better interface exists.
145 */
146 return map_physical(phys_addr, len) ? : ERROR_PTR;
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000147}
148
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000149/* The OS X driver does not differentiate between mapping types. */
150#define sys_physmap_rw_uncached sys_physmap
151#define sys_physmap_ro_cached sys_physmap
152
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600153static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000154{
155 unmap_physical(virt_addr, len);
156}
157
158#else
159#include <sys/mman.h>
160
Stefan Reinauer0593f212009-01-26 01:10:48 +0000161#if defined (__sun) && (defined(__i386) || defined(__amd64))
162# define MEM_DEV "/dev/xsvc"
163#else
164# define MEM_DEV "/dev/mem"
165#endif
166
167static int fd_mem = -1;
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000168static int fd_mem_cached = -1;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000169
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000170/* For MMIO access. Must be uncached, doesn't make sense to restrict to ro. */
Stefan Tauner305e0b92013-07-17 23:46:44 +0000171static void *sys_physmap_rw_uncached(uintptr_t phys_addr, size_t len)
Stefan Reinauer0593f212009-01-26 01:10:48 +0000172{
173 void *virt_addr;
174
175 if (-1 == fd_mem) {
176 /* Open the memory device UNCACHED. Important for MMIO. */
Uwe Hermann7b2969b2009-04-15 10:52:49 +0000177 if (-1 == (fd_mem = open(MEM_DEV, O_RDWR | O_SYNC))) {
Stefan Tauner363fd7e2013-04-07 13:08:30 +0000178 msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000179 return ERROR_PTR;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000180 }
181 }
182
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000183 virt_addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, fd_mem, (off_t)phys_addr);
Patrick Georgied7a9642010-09-25 22:53:44 +0000184 return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000185}
186
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000187/* For reading DMI/coreboot/whatever tables. We should never write, and we
188 * do not care about caching.
189 */
Stefan Tauner305e0b92013-07-17 23:46:44 +0000190static void *sys_physmap_ro_cached(uintptr_t phys_addr, size_t len)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000191{
192 void *virt_addr;
193
194 if (-1 == fd_mem_cached) {
195 /* Open the memory device CACHED. */
196 if (-1 == (fd_mem_cached = open(MEM_DEV, O_RDWR))) {
Stefan Tauner363fd7e2013-04-07 13:08:30 +0000197 msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000198 return ERROR_PTR;
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000199 }
200 }
201
Niklas Söderlund2d8b7ef2013-09-13 19:19:25 +0000202 virt_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd_mem_cached, (off_t)phys_addr);
Patrick Georgied7a9642010-09-25 22:53:44 +0000203 return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000204}
205
Jacob Garberbeeb8bc2019-06-21 15:24:17 -0600206static void sys_physunmap_unaligned(void *virt_addr, size_t len)
Stefan Reinauer0593f212009-01-26 01:10:48 +0000207{
208 munmap(virt_addr, len);
209}
Stefan Reinauerf79edb92009-01-26 01:23:31 +0000210#endif
Stefan Reinauer0593f212009-01-26 01:10:48 +0000211
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000212#define PHYSM_RW 0
213#define PHYSM_RO 1
214#define PHYSM_NOCLEANUP 0
215#define PHYSM_CLEANUP 1
216#define PHYSM_EXACT 0
217#define PHYSM_ROUND 1
218
219/* Round start to nearest page boundary below and set len so that the resulting address range ends at the lowest
220 * possible page boundary where the original address range is still entirely contained. It returns the
221 * difference between the rounded start address and the original start address. */
222static uintptr_t round_to_page_boundaries(uintptr_t *start, size_t *len)
223{
224 uintptr_t page_size = getpagesize();
225 uintptr_t page_mask = ~(page_size-1);
226 uintptr_t end = *start + *len;
227 uintptr_t old_start = *start;
228 msg_gspew("page_size=%" PRIxPTR "\n", page_size);
229 msg_gspew("pre-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
230 PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, end);
231 *start = *start & page_mask;
232 end = (end + page_size - 1) & page_mask;
233 *len = end - *start;
234 msg_gspew("post-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
235 PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, *start + *len);
236 return old_start - *start;
237}
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000238
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000239struct undo_physmap_data {
240 void *virt_addr;
241 size_t len;
242};
243
244static int undo_physmap(void *data)
245{
246 if (data == NULL) {
247 msg_perr("%s: tried to physunmap without valid data!\n", __func__);
248 return 1;
249 }
250 struct undo_physmap_data *d = data;
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000251 physunmap_unaligned(d->virt_addr, d->len);
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000252 free(data);
253 return 0;
254}
255
Niklas Söderlund5d307202013-09-14 09:02:27 +0000256static void *physmap_common(const char *descr, uintptr_t phys_addr, size_t len, bool readonly, bool autocleanup,
257 bool round)
Stefan Reinauer0593f212009-01-26 01:10:48 +0000258{
Stephan Guilloux6d08a3e2009-06-23 10:44:36 +0000259 void *virt_addr;
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000260 uintptr_t offset = 0;
Stephan Guilloux6d08a3e2009-06-23 10:44:36 +0000261
Carl-Daniel Hailfinger1455b2b2009-05-11 14:13:25 +0000262 if (len == 0) {
Stefan Tauner305e0b92013-07-17 23:46:44 +0000263 msg_pspew("Not mapping %s, zero size at 0x%0*" PRIxPTR ".\n", descr, PRIxPTR_WIDTH, phys_addr);
Patrick Georgied7a9642010-09-25 22:53:44 +0000264 return ERROR_PTR;
Carl-Daniel Hailfinger1455b2b2009-05-11 14:13:25 +0000265 }
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000266
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000267 if (round)
268 offset = round_to_page_boundaries(&phys_addr, &len);
Carl-Daniel Hailfinger1455b2b2009-05-11 14:13:25 +0000269
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000270 if (readonly)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000271 virt_addr = sys_physmap_ro_cached(phys_addr, len);
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000272 else
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000273 virt_addr = sys_physmap_rw_uncached(phys_addr, len);
Stefan Reinauer0593f212009-01-26 01:10:48 +0000274
Patrick Georgied7a9642010-09-25 22:53:44 +0000275 if (ERROR_PTR == virt_addr) {
Stefan Reinauer0593f212009-01-26 01:10:48 +0000276 if (NULL == descr)
277 descr = "memory";
Stefan Tauner0554ca52013-07-25 22:54:25 +0000278 msg_perr("Error accessing %s, 0x%zx bytes at 0x%0*" PRIxPTR "\n",
279 descr, len, PRIxPTR_WIDTH, phys_addr);
Stefan Tauner363fd7e2013-04-07 13:08:30 +0000280 msg_perr(MEM_DEV " mmap failed: %s\n", strerror(errno));
Sean Nelson316a29f2010-05-07 20:09:04 +0000281#ifdef __linux__
Stefan Reinauer0593f212009-01-26 01:10:48 +0000282 if (EINVAL == errno) {
Sean Nelson316a29f2010-05-07 20:09:04 +0000283 msg_perr("In Linux this error can be caused by the CONFIG_NONPROMISC_DEVMEM (<2.6.27),\n");
284 msg_perr("CONFIG_STRICT_DEVMEM (>=2.6.27) and CONFIG_X86_PAT kernel options.\n");
285 msg_perr("Please check if either is enabled in your kernel before reporting a failure.\n");
286 msg_perr("You can override CONFIG_X86_PAT at boot with the nopat kernel parameter but\n");
287 msg_perr("disabling the other option unfortunately requires a kernel recompile. Sorry!\n");
Stefan Reinauer0593f212009-01-26 01:10:48 +0000288 }
Carl-Daniel Hailfingerb63b0672010-07-02 17:12:50 +0000289#elif defined (__OpenBSD__)
290 msg_perr("Please set securelevel=-1 in /etc/rc.securelevel "
Uwe Hermann91f4afa2011-07-28 08:13:25 +0000291 "and reboot, or reboot into\n"
292 "single user mode.\n");
Sean Nelson316a29f2010-05-07 20:09:04 +0000293#endif
Niklas Söderlund5d307202013-09-14 09:02:27 +0000294 return ERROR_PTR;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000295 }
296
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000297 if (autocleanup) {
Angel Pons690a9442021-06-07 12:33:53 +0200298 struct undo_physmap_data *d = malloc(sizeof(*d));
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000299 if (d == NULL) {
300 msg_perr("%s: Out of memory!\n", __func__);
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000301 physunmap_unaligned(virt_addr, len);
Niklas Söderlund5d307202013-09-14 09:02:27 +0000302 return ERROR_PTR;
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000303 }
304
305 d->virt_addr = virt_addr;
306 d->len = len;
307 if (register_shutdown(undo_physmap, d) != 0) {
308 msg_perr("%s: Could not register shutdown function!\n", __func__);
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000309 physunmap_unaligned(virt_addr, len);
Niklas Söderlund5d307202013-09-14 09:02:27 +0000310 return ERROR_PTR;
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000311 }
312 }
313
Stefan Tauner3285d7f2013-08-14 16:28:19 +0000314 return virt_addr + offset;
Stefan Reinauer0593f212009-01-26 01:10:48 +0000315}
Stefan Reinauer8fa64812009-08-12 09:27:45 +0000316
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000317void physunmap_unaligned(void *virt_addr, size_t len)
318{
319 /* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
320 if (virt_addr == ERROR_PTR) {
321 msg_perr("Trying to unmap a nonexisting mapping!\n"
Nico Huberac90af62022-12-18 00:22:47 +0000322 "Please report a bug at flashrom-stable@flashrom.org\n");
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000323 return;
324 }
325
326 sys_physunmap_unaligned(virt_addr, len);
327}
328
329void physunmap(void *virt_addr, size_t len)
330{
331 uintptr_t tmp;
332
333 /* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
334 if (virt_addr == ERROR_PTR) {
335 msg_perr("Trying to unmap a nonexisting mapping!\n"
Nico Huberac90af62022-12-18 00:22:47 +0000336 "Please report a bug at flashrom-stable@flashrom.org\n");
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000337 return;
338 }
339 tmp = (uintptr_t)virt_addr;
340 /* We assume that the virtual address of a page-aligned physical address is page-aligned as well. By
341 * extension, rounding a virtual unaligned address as returned by physmap should yield the same offset
342 * between rounded and original virtual address as between rounded and original physical address.
343 */
344 round_to_page_boundaries(&tmp, &len);
345 virt_addr = (void *)tmp;
346 physunmap_unaligned(virt_addr, len);
347}
348
Stefan Tauner305e0b92013-07-17 23:46:44 +0000349void *physmap(const char *descr, uintptr_t phys_addr, size_t len)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000350{
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000351 return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_NOCLEANUP, PHYSM_ROUND);
Stefan Tauner7fb5aa02013-08-14 15:48:44 +0000352}
353
354void *rphysmap(const char *descr, uintptr_t phys_addr, size_t len)
355{
Niklas Söderlund5d307202013-09-14 09:02:27 +0000356 return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_CLEANUP, PHYSM_ROUND);
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000357}
358
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000359void *physmap_ro(const char *descr, uintptr_t phys_addr, size_t len)
Sean Nelson4c6d3a42013-09-11 23:35:03 +0000360{
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000361 return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_ROUND);
Sean Nelson4c6d3a42013-09-11 23:35:03 +0000362}
363
Carl-Daniel Hailfinger43eac032014-03-05 00:16:16 +0000364void *physmap_ro_unaligned(const char *descr, uintptr_t phys_addr, size_t len)
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000365{
Niklas Söderlund5d307202013-09-14 09:02:27 +0000366 return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_EXACT);
Carl-Daniel Hailfingerbaaffe02010-02-02 11:09:03 +0000367}
Thomas Heijligen3f4d35d2022-01-17 15:11:43 +0100368
369/* Prevent reordering and/or merging of reads/writes to hardware.
370 * Such reordering and/or merging would break device accesses which depend on the exact access order.
371 */
372static inline void sync_primitive(void)
373{
374/* This is not needed for...
375 * - x86: uses uncached accesses which have a strongly ordered memory model.
376 * - MIPS: uses uncached accesses in mode 2 on /dev/mem which has also a strongly ordered memory model.
377 * - ARM: uses a strongly ordered memory model for device memories.
378 *
379 * See also https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/memory-barriers.txt
380 */
381// cf. http://lxr.free-electrons.com/source/arch/powerpc/include/asm/barrier.h
382#if defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) || defined(__POWERPC__) || \
383 defined(__ppc__) || defined(__ppc64__) || defined(_M_PPC) || defined(_ARCH_PPC) || \
384 defined(_ARCH_PPC64) || defined(__ppc)
385 asm("eieio" : : : "memory");
386#elif (__sparc__) || defined (__sparc)
387#if defined(__sparc_v9__) || defined(__sparcv9)
388 /* Sparc V9 CPUs support three different memory orderings that range from x86-like TSO to PowerPC-like
389 * RMO. The modes can be switched at runtime thus to make sure we maintain the right order of access we
390 * use the strongest hardware memory barriers that exist on Sparc V9. */
391 asm volatile ("membar #Sync" ::: "memory");
392#elif defined(__sparc_v8__) || defined(__sparcv8)
393 /* On SPARC V8 there is no RMO just PSO and that does not apply to I/O accesses... but if V8 code is run
394 * on V9 CPUs it might apply... or not... we issue a write barrier anyway. That's the most suitable
395 * operation in the V8 instruction set anyway. If you know better then please tell us. */
396 asm volatile ("stbar");
397#else
398 #error Unknown and/or unsupported SPARC instruction set version detected.
399#endif
400#endif
401}
402
403void mmio_writeb(uint8_t val, void *addr)
404{
405 *(volatile uint8_t *) addr = val;
406 sync_primitive();
407}
408
409void mmio_writew(uint16_t val, void *addr)
410{
411 *(volatile uint16_t *) addr = val;
412 sync_primitive();
413}
414
415void mmio_writel(uint32_t val, void *addr)
416{
417 *(volatile uint32_t *) addr = val;
418 sync_primitive();
419}
420
421uint8_t mmio_readb(const void *addr)
422{
423 return *(volatile const uint8_t *) addr;
424}
425
426uint16_t mmio_readw(const void *addr)
427{
428 return *(volatile const uint16_t *) addr;
429}
430
431uint32_t mmio_readl(const void *addr)
432{
433 return *(volatile const uint32_t *) addr;
434}
435
436void mmio_readn(const void *addr, uint8_t *buf, size_t len)
437{
438 memcpy(buf, addr, len);
439 return;
440}
441
442void mmio_le_writeb(uint8_t val, void *addr)
443{
444 mmio_writeb(cpu_to_le8(val), addr);
445}
446
447void mmio_le_writew(uint16_t val, void *addr)
448{
449 mmio_writew(cpu_to_le16(val), addr);
450}
451
452void mmio_le_writel(uint32_t val, void *addr)
453{
454 mmio_writel(cpu_to_le32(val), addr);
455}
456
457uint8_t mmio_le_readb(const void *addr)
458{
459 return le_to_cpu8(mmio_readb(addr));
460}
461
462uint16_t mmio_le_readw(const void *addr)
463{
464 return le_to_cpu16(mmio_readw(addr));
465}
466
467uint32_t mmio_le_readl(const void *addr)
468{
469 return le_to_cpu32(mmio_readl(addr));
470}
471
472enum mmio_write_type {
473 mmio_write_type_b,
474 mmio_write_type_w,
475 mmio_write_type_l,
476};
477
478struct undo_mmio_write_data {
479 void *addr;
480 int reg;
481 enum mmio_write_type type;
482 union {
483 uint8_t bdata;
484 uint16_t wdata;
485 uint32_t ldata;
486 };
487};
488
489static int undo_mmio_write(void *p)
490{
491 struct undo_mmio_write_data *data = p;
492 msg_pdbg("Restoring MMIO space at %p\n", data->addr);
493 switch (data->type) {
494 case mmio_write_type_b:
495 mmio_writeb(data->bdata, data->addr);
496 break;
497 case mmio_write_type_w:
498 mmio_writew(data->wdata, data->addr);
499 break;
500 case mmio_write_type_l:
501 mmio_writel(data->ldata, data->addr);
502 break;
503 }
504 /* p was allocated in register_undo_mmio_write. */
505 free(p);
506 return 0;
507}
508
509#define register_undo_mmio_write(a, c) \
510{ \
511 struct undo_mmio_write_data *undo_mmio_write_data; \
512 undo_mmio_write_data = malloc(sizeof(*undo_mmio_write_data)); \
513 if (!undo_mmio_write_data) { \
514 msg_gerr("Out of memory!\n"); \
515 exit(1); \
516 } \
517 undo_mmio_write_data->addr = a; \
518 undo_mmio_write_data->type = mmio_write_type_##c; \
519 undo_mmio_write_data->c##data = mmio_read##c(a); \
520 register_shutdown(undo_mmio_write, undo_mmio_write_data); \
521}
522
523#define register_undo_mmio_writeb(a) register_undo_mmio_write(a, b)
524#define register_undo_mmio_writew(a) register_undo_mmio_write(a, w)
525#define register_undo_mmio_writel(a) register_undo_mmio_write(a, l)
526
527void rmmio_writeb(uint8_t val, void *addr)
528{
529 register_undo_mmio_writeb(addr);
530 mmio_writeb(val, addr);
531}
532
533void rmmio_writew(uint16_t val, void *addr)
534{
535 register_undo_mmio_writew(addr);
536 mmio_writew(val, addr);
537}
538
539void rmmio_writel(uint32_t val, void *addr)
540{
541 register_undo_mmio_writel(addr);
542 mmio_writel(val, addr);
543}
544
545void rmmio_le_writeb(uint8_t val, void *addr)
546{
547 register_undo_mmio_writeb(addr);
548 mmio_le_writeb(val, addr);
549}
550
551void rmmio_le_writew(uint16_t val, void *addr)
552{
553 register_undo_mmio_writew(addr);
554 mmio_le_writew(val, addr);
555}
556
557void rmmio_le_writel(uint32_t val, void *addr)
558{
559 register_undo_mmio_writel(addr);
560 mmio_le_writel(val, addr);
561}
562
563void rmmio_valb(void *addr)
564{
565 register_undo_mmio_writeb(addr);
566}
567
568void rmmio_valw(void *addr)
569{
570 register_undo_mmio_writew(addr);
571}
572
573void rmmio_vall(void *addr)
574{
575 register_undo_mmio_writel(addr);
576}