Peter Stuge | 33fefca | 2009-01-26 01:33:02 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the flashrom project. |
| 3 | * |
| 4 | * Copyright (C) 2009 Peter Stuge <peter@stuge.se> |
Stefan Reinauer | 8fa6481 | 2009-08-12 09:27:45 +0000 | [diff] [blame] | 5 | * Copyright (C) 2009 coresystems GmbH |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 6 | * Copyright (C) 2010 Carl-Daniel Hailfinger |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 7 | * Copyright (C) 2010 Rudolf Marek <r.marek@assembler.cz> |
Peter Stuge | 33fefca | 2009-01-26 01:33:02 +0000 | [diff] [blame] | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; version 2 of the License. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
Peter Stuge | 33fefca | 2009-01-26 01:33:02 +0000 | [diff] [blame] | 17 | */ |
Uwe Hermann | 7b2969b | 2009-04-15 10:52:49 +0000 | [diff] [blame] | 18 | |
Carl-Daniel Hailfinger | 831e8f4 | 2010-05-30 22:24:40 +0000 | [diff] [blame] | 19 | #include <unistd.h> |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 20 | #include <stdbool.h> |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 21 | #include <stdint.h> |
Carl-Daniel Hailfinger | 831e8f4 | 2010-05-30 22:24:40 +0000 | [diff] [blame] | 22 | #include <stdio.h> |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 23 | #include <stdlib.h> |
Stefan Reinauer | 8fa6481 | 2009-08-12 09:27:45 +0000 | [diff] [blame] | 24 | #include <string.h> |
Carl-Daniel Hailfinger | 11990da | 2013-07-13 23:21:05 +0000 | [diff] [blame] | 25 | #include <errno.h> |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 26 | #include "flash.h" |
Thomas Heijligen | c92f94b | 2022-03-17 13:41:17 +0100 | [diff] [blame] | 27 | #include "platform.h" |
Thomas Heijligen | 74b4aa0 | 2021-12-14 17:52:30 +0100 | [diff] [blame] | 28 | #include "hwaccess_physmap.h" |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 29 | |
Patrick Georgi | a9095a9 | 2010-09-30 17:03:32 +0000 | [diff] [blame] | 30 | #if !defined(__DJGPP__) && !defined(__LIBPAYLOAD__) |
Carl-Daniel Hailfinger | 11990da | 2013-07-13 23:21:05 +0000 | [diff] [blame] | 31 | /* No file access needed/possible to get mmap access permissions or access MSR. */ |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 32 | #include <unistd.h> |
Carl-Daniel Hailfinger | dcef67e | 2010-06-21 23:20:15 +0000 | [diff] [blame] | 33 | #include <sys/stat.h> |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 34 | #include <sys/types.h> |
Carl-Daniel Hailfinger | dcef67e | 2010-06-21 23:20:15 +0000 | [diff] [blame] | 35 | #include <fcntl.h> |
Carl-Daniel Hailfinger | dcef67e | 2010-06-21 23:20:15 +0000 | [diff] [blame] | 36 | #endif |
| 37 | |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 38 | #ifdef __DJGPP__ |
| 39 | #include <dpmi.h> |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 40 | #include <malloc.h> |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 41 | #include <sys/nearptr.h> |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 42 | |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 43 | #define ONE_MEGABYTE (1024 * 1024) |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 44 | #define MEM_DEV "dpmi" |
| 45 | |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 46 | static void *realmem_map_aligned; |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 47 | |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 48 | static void *map_first_meg(uintptr_t phys_addr, size_t len) |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 49 | { |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 50 | void *realmem_map; |
| 51 | size_t pagesize; |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 52 | |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 53 | if (realmem_map_aligned) |
| 54 | return realmem_map_aligned + phys_addr; |
| 55 | |
| 56 | /* valloc() from DJGPP 2.05 does not work properly */ |
| 57 | pagesize = getpagesize(); |
| 58 | |
| 59 | realmem_map = malloc(ONE_MEGABYTE + pagesize); |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 60 | |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 61 | if (!realmem_map) |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 62 | return ERROR_PTR; |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 63 | |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 64 | realmem_map_aligned = (void *)(((size_t) realmem_map + |
| 65 | (pagesize - 1)) & ~(pagesize - 1)); |
| 66 | |
| 67 | if (__djgpp_map_physical_memory(realmem_map_aligned, ONE_MEGABYTE, 0)) { |
Carl-Daniel Hailfinger | 602de98 | 2010-10-05 23:21:51 +0000 | [diff] [blame] | 68 | free(realmem_map); |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 69 | realmem_map_aligned = NULL; |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 70 | return ERROR_PTR; |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 71 | } |
| 72 | |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 73 | return realmem_map_aligned + phys_addr; |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 74 | } |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 75 | |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 76 | static void *sys_physmap(uintptr_t phys_addr, size_t len) |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 77 | { |
| 78 | int ret; |
| 79 | __dpmi_meminfo mi; |
| 80 | |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 81 | /* Enable 4GB limit on DS descriptor. */ |
| 82 | if (!__djgpp_nearptr_enable()) |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 83 | return ERROR_PTR; |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 84 | |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 85 | if ((phys_addr + len - 1) < ONE_MEGABYTE) { |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 86 | /* We need to use another method to map first 1MB. */ |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 87 | return map_first_meg(phys_addr, len); |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | mi.address = phys_addr; |
| 91 | mi.size = len; |
Carl-Daniel Hailfinger | 082c8b5 | 2011-08-15 19:54:20 +0000 | [diff] [blame] | 92 | ret = __dpmi_physical_address_mapping(&mi); |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 93 | |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 94 | if (ret != 0) |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 95 | return ERROR_PTR; |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 96 | |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 97 | return (void *) mi.address + __djgpp_conventional_base; |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | #define sys_physmap_rw_uncached sys_physmap |
Rudolf Marek | 837d810 | 2010-04-25 22:47:50 +0000 | [diff] [blame] | 101 | #define sys_physmap_ro_cached sys_physmap |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 102 | |
Jacob Garber | beeb8bc | 2019-06-21 15:24:17 -0600 | [diff] [blame] | 103 | static void sys_physunmap_unaligned(void *virt_addr, size_t len) |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 104 | { |
| 105 | __dpmi_meminfo mi; |
| 106 | |
Carl-Daniel Hailfinger | 602de98 | 2010-10-05 23:21:51 +0000 | [diff] [blame] | 107 | /* There is no known way to unmap the first 1 MB. The DPMI server will |
| 108 | * do this for us on exit. |
| 109 | */ |
Rudolf Marek | 25fde40 | 2017-12-29 16:30:49 +0100 | [diff] [blame] | 110 | if ((virt_addr >= realmem_map_aligned) && |
| 111 | ((virt_addr + len) <= (realmem_map_aligned + ONE_MEGABYTE))) { |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 112 | return; |
| 113 | } |
| 114 | |
| 115 | mi.address = (unsigned long) virt_addr; |
| 116 | __dpmi_free_physical_address_mapping(&mi); |
| 117 | } |
| 118 | |
Patrick Georgi | a9095a9 | 2010-09-30 17:03:32 +0000 | [diff] [blame] | 119 | #elif defined(__LIBPAYLOAD__) |
| 120 | #include <arch/virtual.h> |
| 121 | |
| 122 | #define MEM_DEV "" |
| 123 | |
Thomas Heijligen | d439088 | 2022-11-18 22:53:56 +0100 | [diff] [blame] | 124 | static void *sys_physmap(uintptr_t phys_addr, size_t len) |
Patrick Georgi | a9095a9 | 2010-09-30 17:03:32 +0000 | [diff] [blame] | 125 | { |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 126 | return (void *)phys_to_virt(phys_addr); |
Patrick Georgi | a9095a9 | 2010-09-30 17:03:32 +0000 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | #define sys_physmap_rw_uncached sys_physmap |
| 130 | #define sys_physmap_ro_cached sys_physmap |
| 131 | |
Jacob Garber | beeb8bc | 2019-06-21 15:24:17 -0600 | [diff] [blame] | 132 | static void sys_physunmap_unaligned(void *virt_addr, size_t len) |
Patrick Georgi | a9095a9 | 2010-09-30 17:03:32 +0000 | [diff] [blame] | 133 | { |
| 134 | } |
Carl-Daniel Hailfinger | 60d9bd2 | 2012-08-09 23:34:41 +0000 | [diff] [blame] | 135 | #elif defined(__MACH__) && defined(__APPLE__) |
Thomas Heijligen | e0dff11 | 2022-03-18 20:47:13 +0100 | [diff] [blame] | 136 | #include <DirectHW/DirectHW.h> |
Rudolf Marek | 03ae5c1 | 2010-03-16 23:59:19 +0000 | [diff] [blame] | 137 | |
Stefan Reinauer | 83704c5 | 2011-03-18 22:00:15 +0000 | [diff] [blame] | 138 | #define MEM_DEV "DirectHW" |
Stefan Reinauer | f79edb9 | 2009-01-26 01:23:31 +0000 | [diff] [blame] | 139 | |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 140 | static void *sys_physmap(uintptr_t phys_addr, size_t len) |
Stefan Reinauer | f79edb9 | 2009-01-26 01:23:31 +0000 | [diff] [blame] | 141 | { |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 142 | /* The short form of ?: is a GNU extension. |
| 143 | * FIXME: map_physical returns NULL both for errors and for success |
| 144 | * if the region is mapped at virtual address zero. If in doubt, report |
| 145 | * an error until a better interface exists. |
| 146 | */ |
| 147 | return map_physical(phys_addr, len) ? : ERROR_PTR; |
Stefan Reinauer | f79edb9 | 2009-01-26 01:23:31 +0000 | [diff] [blame] | 148 | } |
| 149 | |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 150 | /* The OS X driver does not differentiate between mapping types. */ |
| 151 | #define sys_physmap_rw_uncached sys_physmap |
| 152 | #define sys_physmap_ro_cached sys_physmap |
| 153 | |
Jacob Garber | beeb8bc | 2019-06-21 15:24:17 -0600 | [diff] [blame] | 154 | static void sys_physunmap_unaligned(void *virt_addr, size_t len) |
Stefan Reinauer | f79edb9 | 2009-01-26 01:23:31 +0000 | [diff] [blame] | 155 | { |
| 156 | unmap_physical(virt_addr, len); |
| 157 | } |
| 158 | |
| 159 | #else |
| 160 | #include <sys/mman.h> |
| 161 | |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 162 | #if defined (__sun) && (defined(__i386) || defined(__amd64)) |
| 163 | # define MEM_DEV "/dev/xsvc" |
| 164 | #else |
| 165 | # define MEM_DEV "/dev/mem" |
| 166 | #endif |
| 167 | |
| 168 | static int fd_mem = -1; |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 169 | static int fd_mem_cached = -1; |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 170 | |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 171 | /* For MMIO access. Must be uncached, doesn't make sense to restrict to ro. */ |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 172 | static void *sys_physmap_rw_uncached(uintptr_t phys_addr, size_t len) |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 173 | { |
| 174 | void *virt_addr; |
| 175 | |
| 176 | if (-1 == fd_mem) { |
| 177 | /* Open the memory device UNCACHED. Important for MMIO. */ |
Uwe Hermann | 7b2969b | 2009-04-15 10:52:49 +0000 | [diff] [blame] | 178 | if (-1 == (fd_mem = open(MEM_DEV, O_RDWR | O_SYNC))) { |
Stefan Tauner | 363fd7e | 2013-04-07 13:08:30 +0000 | [diff] [blame] | 179 | msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno)); |
Niklas Söderlund | 2d8b7ef | 2013-09-13 19:19:25 +0000 | [diff] [blame] | 180 | return ERROR_PTR; |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 181 | } |
| 182 | } |
| 183 | |
Niklas Söderlund | 2d8b7ef | 2013-09-13 19:19:25 +0000 | [diff] [blame] | 184 | virt_addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, fd_mem, (off_t)phys_addr); |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 185 | return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr; |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 186 | } |
| 187 | |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 188 | /* For reading DMI/coreboot/whatever tables. We should never write, and we |
| 189 | * do not care about caching. |
| 190 | */ |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 191 | static void *sys_physmap_ro_cached(uintptr_t phys_addr, size_t len) |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 192 | { |
| 193 | void *virt_addr; |
| 194 | |
| 195 | if (-1 == fd_mem_cached) { |
| 196 | /* Open the memory device CACHED. */ |
| 197 | if (-1 == (fd_mem_cached = open(MEM_DEV, O_RDWR))) { |
Stefan Tauner | 363fd7e | 2013-04-07 13:08:30 +0000 | [diff] [blame] | 198 | msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno)); |
Niklas Söderlund | 2d8b7ef | 2013-09-13 19:19:25 +0000 | [diff] [blame] | 199 | return ERROR_PTR; |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 200 | } |
| 201 | } |
| 202 | |
Niklas Söderlund | 2d8b7ef | 2013-09-13 19:19:25 +0000 | [diff] [blame] | 203 | virt_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd_mem_cached, (off_t)phys_addr); |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 204 | return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr; |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 205 | } |
| 206 | |
Jacob Garber | beeb8bc | 2019-06-21 15:24:17 -0600 | [diff] [blame] | 207 | static void sys_physunmap_unaligned(void *virt_addr, size_t len) |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 208 | { |
| 209 | munmap(virt_addr, len); |
| 210 | } |
Stefan Reinauer | f79edb9 | 2009-01-26 01:23:31 +0000 | [diff] [blame] | 211 | #endif |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 212 | |
Stefan Tauner | 3285d7f | 2013-08-14 16:28:19 +0000 | [diff] [blame] | 213 | #define PHYSM_RW 0 |
| 214 | #define PHYSM_RO 1 |
| 215 | #define PHYSM_NOCLEANUP 0 |
| 216 | #define PHYSM_CLEANUP 1 |
| 217 | #define PHYSM_EXACT 0 |
| 218 | #define PHYSM_ROUND 1 |
| 219 | |
| 220 | /* Round start to nearest page boundary below and set len so that the resulting address range ends at the lowest |
| 221 | * possible page boundary where the original address range is still entirely contained. It returns the |
| 222 | * difference between the rounded start address and the original start address. */ |
| 223 | static uintptr_t round_to_page_boundaries(uintptr_t *start, size_t *len) |
| 224 | { |
| 225 | uintptr_t page_size = getpagesize(); |
| 226 | uintptr_t page_mask = ~(page_size-1); |
| 227 | uintptr_t end = *start + *len; |
| 228 | uintptr_t old_start = *start; |
| 229 | msg_gspew("page_size=%" PRIxPTR "\n", page_size); |
| 230 | msg_gspew("pre-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n", |
| 231 | PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, end); |
| 232 | *start = *start & page_mask; |
| 233 | end = (end + page_size - 1) & page_mask; |
| 234 | *len = end - *start; |
| 235 | msg_gspew("post-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n", |
| 236 | PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, *start + *len); |
| 237 | return old_start - *start; |
| 238 | } |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 239 | |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 240 | struct undo_physmap_data { |
| 241 | void *virt_addr; |
| 242 | size_t len; |
| 243 | }; |
| 244 | |
| 245 | static int undo_physmap(void *data) |
| 246 | { |
| 247 | if (data == NULL) { |
| 248 | msg_perr("%s: tried to physunmap without valid data!\n", __func__); |
| 249 | return 1; |
| 250 | } |
| 251 | struct undo_physmap_data *d = data; |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 252 | physunmap_unaligned(d->virt_addr, d->len); |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 253 | free(data); |
| 254 | return 0; |
| 255 | } |
| 256 | |
Niklas Söderlund | 5d30720 | 2013-09-14 09:02:27 +0000 | [diff] [blame] | 257 | static void *physmap_common(const char *descr, uintptr_t phys_addr, size_t len, bool readonly, bool autocleanup, |
| 258 | bool round) |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 259 | { |
Stephan Guilloux | 6d08a3e | 2009-06-23 10:44:36 +0000 | [diff] [blame] | 260 | void *virt_addr; |
Stefan Tauner | 3285d7f | 2013-08-14 16:28:19 +0000 | [diff] [blame] | 261 | uintptr_t offset = 0; |
Stephan Guilloux | 6d08a3e | 2009-06-23 10:44:36 +0000 | [diff] [blame] | 262 | |
Carl-Daniel Hailfinger | 1455b2b | 2009-05-11 14:13:25 +0000 | [diff] [blame] | 263 | if (len == 0) { |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 264 | msg_pspew("Not mapping %s, zero size at 0x%0*" PRIxPTR ".\n", descr, PRIxPTR_WIDTH, phys_addr); |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 265 | return ERROR_PTR; |
Carl-Daniel Hailfinger | 1455b2b | 2009-05-11 14:13:25 +0000 | [diff] [blame] | 266 | } |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 267 | |
Stefan Tauner | 3285d7f | 2013-08-14 16:28:19 +0000 | [diff] [blame] | 268 | if (round) |
| 269 | offset = round_to_page_boundaries(&phys_addr, &len); |
Carl-Daniel Hailfinger | 1455b2b | 2009-05-11 14:13:25 +0000 | [diff] [blame] | 270 | |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 271 | if (readonly) |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 272 | virt_addr = sys_physmap_ro_cached(phys_addr, len); |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 273 | else |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 274 | virt_addr = sys_physmap_rw_uncached(phys_addr, len); |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 275 | |
Patrick Georgi | ed7a964 | 2010-09-25 22:53:44 +0000 | [diff] [blame] | 276 | if (ERROR_PTR == virt_addr) { |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 277 | if (NULL == descr) |
| 278 | descr = "memory"; |
Stefan Tauner | 0554ca5 | 2013-07-25 22:54:25 +0000 | [diff] [blame] | 279 | msg_perr("Error accessing %s, 0x%zx bytes at 0x%0*" PRIxPTR "\n", |
| 280 | descr, len, PRIxPTR_WIDTH, phys_addr); |
Stefan Tauner | 363fd7e | 2013-04-07 13:08:30 +0000 | [diff] [blame] | 281 | msg_perr(MEM_DEV " mmap failed: %s\n", strerror(errno)); |
Sean Nelson | 316a29f | 2010-05-07 20:09:04 +0000 | [diff] [blame] | 282 | #ifdef __linux__ |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 283 | if (EINVAL == errno) { |
Sean Nelson | 316a29f | 2010-05-07 20:09:04 +0000 | [diff] [blame] | 284 | msg_perr("In Linux this error can be caused by the CONFIG_NONPROMISC_DEVMEM (<2.6.27),\n"); |
| 285 | msg_perr("CONFIG_STRICT_DEVMEM (>=2.6.27) and CONFIG_X86_PAT kernel options.\n"); |
| 286 | msg_perr("Please check if either is enabled in your kernel before reporting a failure.\n"); |
| 287 | msg_perr("You can override CONFIG_X86_PAT at boot with the nopat kernel parameter but\n"); |
| 288 | msg_perr("disabling the other option unfortunately requires a kernel recompile. Sorry!\n"); |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 289 | } |
Carl-Daniel Hailfinger | b63b067 | 2010-07-02 17:12:50 +0000 | [diff] [blame] | 290 | #elif defined (__OpenBSD__) |
| 291 | msg_perr("Please set securelevel=-1 in /etc/rc.securelevel " |
Uwe Hermann | 91f4afa | 2011-07-28 08:13:25 +0000 | [diff] [blame] | 292 | "and reboot, or reboot into\n" |
| 293 | "single user mode.\n"); |
Sean Nelson | 316a29f | 2010-05-07 20:09:04 +0000 | [diff] [blame] | 294 | #endif |
Niklas Söderlund | 5d30720 | 2013-09-14 09:02:27 +0000 | [diff] [blame] | 295 | return ERROR_PTR; |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 296 | } |
| 297 | |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 298 | if (autocleanup) { |
Angel Pons | 690a944 | 2021-06-07 12:33:53 +0200 | [diff] [blame] | 299 | struct undo_physmap_data *d = malloc(sizeof(*d)); |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 300 | if (d == NULL) { |
| 301 | msg_perr("%s: Out of memory!\n", __func__); |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 302 | physunmap_unaligned(virt_addr, len); |
Niklas Söderlund | 5d30720 | 2013-09-14 09:02:27 +0000 | [diff] [blame] | 303 | return ERROR_PTR; |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | d->virt_addr = virt_addr; |
| 307 | d->len = len; |
| 308 | if (register_shutdown(undo_physmap, d) != 0) { |
| 309 | msg_perr("%s: Could not register shutdown function!\n", __func__); |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 310 | physunmap_unaligned(virt_addr, len); |
Niklas Söderlund | 5d30720 | 2013-09-14 09:02:27 +0000 | [diff] [blame] | 311 | return ERROR_PTR; |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 312 | } |
| 313 | } |
| 314 | |
Stefan Tauner | 3285d7f | 2013-08-14 16:28:19 +0000 | [diff] [blame] | 315 | return virt_addr + offset; |
Stefan Reinauer | 0593f21 | 2009-01-26 01:10:48 +0000 | [diff] [blame] | 316 | } |
Stefan Reinauer | 8fa6481 | 2009-08-12 09:27:45 +0000 | [diff] [blame] | 317 | |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 318 | void physunmap_unaligned(void *virt_addr, size_t len) |
| 319 | { |
| 320 | /* No need to check for zero size, such mappings would have yielded ERROR_PTR. */ |
| 321 | if (virt_addr == ERROR_PTR) { |
| 322 | msg_perr("Trying to unmap a nonexisting mapping!\n" |
Nico Huber | c3b02dc | 2023-08-12 01:13:45 +0200 | [diff] [blame] | 323 | "Please report a bug at flashprog@flashprog.org\n"); |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 324 | return; |
| 325 | } |
| 326 | |
| 327 | sys_physunmap_unaligned(virt_addr, len); |
| 328 | } |
| 329 | |
| 330 | void physunmap(void *virt_addr, size_t len) |
| 331 | { |
| 332 | uintptr_t tmp; |
| 333 | |
| 334 | /* No need to check for zero size, such mappings would have yielded ERROR_PTR. */ |
| 335 | if (virt_addr == ERROR_PTR) { |
| 336 | msg_perr("Trying to unmap a nonexisting mapping!\n" |
Nico Huber | c3b02dc | 2023-08-12 01:13:45 +0200 | [diff] [blame] | 337 | "Please report a bug at flashprog@flashprog.org\n"); |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 338 | return; |
| 339 | } |
| 340 | tmp = (uintptr_t)virt_addr; |
| 341 | /* We assume that the virtual address of a page-aligned physical address is page-aligned as well. By |
| 342 | * extension, rounding a virtual unaligned address as returned by physmap should yield the same offset |
| 343 | * between rounded and original virtual address as between rounded and original physical address. |
| 344 | */ |
| 345 | round_to_page_boundaries(&tmp, &len); |
| 346 | virt_addr = (void *)tmp; |
| 347 | physunmap_unaligned(virt_addr, len); |
| 348 | } |
| 349 | |
Stefan Tauner | 305e0b9 | 2013-07-17 23:46:44 +0000 | [diff] [blame] | 350 | void *physmap(const char *descr, uintptr_t phys_addr, size_t len) |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 351 | { |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 352 | return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_NOCLEANUP, PHYSM_ROUND); |
Stefan Tauner | 7fb5aa0 | 2013-08-14 15:48:44 +0000 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | void *rphysmap(const char *descr, uintptr_t phys_addr, size_t len) |
| 356 | { |
Niklas Söderlund | 5d30720 | 2013-09-14 09:02:27 +0000 | [diff] [blame] | 357 | return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_CLEANUP, PHYSM_ROUND); |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 358 | } |
| 359 | |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 360 | void *physmap_ro(const char *descr, uintptr_t phys_addr, size_t len) |
Sean Nelson | 4c6d3a4 | 2013-09-11 23:35:03 +0000 | [diff] [blame] | 361 | { |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 362 | return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_ROUND); |
Sean Nelson | 4c6d3a4 | 2013-09-11 23:35:03 +0000 | [diff] [blame] | 363 | } |
| 364 | |
Carl-Daniel Hailfinger | 43eac03 | 2014-03-05 00:16:16 +0000 | [diff] [blame] | 365 | void *physmap_ro_unaligned(const char *descr, uintptr_t phys_addr, size_t len) |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 366 | { |
Niklas Söderlund | 5d30720 | 2013-09-14 09:02:27 +0000 | [diff] [blame] | 367 | return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_EXACT); |
Carl-Daniel Hailfinger | baaffe0 | 2010-02-02 11:09:03 +0000 | [diff] [blame] | 368 | } |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 369 | |
| 370 | /* Prevent reordering and/or merging of reads/writes to hardware. |
| 371 | * Such reordering and/or merging would break device accesses which depend on the exact access order. |
| 372 | */ |
| 373 | static inline void sync_primitive(void) |
| 374 | { |
| 375 | /* This is not needed for... |
| 376 | * - x86: uses uncached accesses which have a strongly ordered memory model. |
| 377 | * - MIPS: uses uncached accesses in mode 2 on /dev/mem which has also a strongly ordered memory model. |
| 378 | * - ARM: uses a strongly ordered memory model for device memories. |
| 379 | * |
| 380 | * See also https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/memory-barriers.txt |
| 381 | */ |
| 382 | // cf. http://lxr.free-electrons.com/source/arch/powerpc/include/asm/barrier.h |
| 383 | #if defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) || defined(__POWERPC__) || \ |
| 384 | defined(__ppc__) || defined(__ppc64__) || defined(_M_PPC) || defined(_ARCH_PPC) || \ |
| 385 | defined(_ARCH_PPC64) || defined(__ppc) |
Rosen Penev | 91ed99f | 2022-03-12 23:54:18 -0800 | [diff] [blame] | 386 | __asm__("eieio" : : : "memory"); |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 387 | #elif (__sparc__) || defined (__sparc) |
| 388 | #if defined(__sparc_v9__) || defined(__sparcv9) |
| 389 | /* Sparc V9 CPUs support three different memory orderings that range from x86-like TSO to PowerPC-like |
| 390 | * RMO. The modes can be switched at runtime thus to make sure we maintain the right order of access we |
| 391 | * use the strongest hardware memory barriers that exist on Sparc V9. */ |
Rosen Penev | 91ed99f | 2022-03-12 23:54:18 -0800 | [diff] [blame] | 392 | __asm__ volatile ("membar #Sync" ::: "memory"); |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 393 | #elif defined(__sparc_v8__) || defined(__sparcv8) |
| 394 | /* On SPARC V8 there is no RMO just PSO and that does not apply to I/O accesses... but if V8 code is run |
| 395 | * on V9 CPUs it might apply... or not... we issue a write barrier anyway. That's the most suitable |
| 396 | * operation in the V8 instruction set anyway. If you know better then please tell us. */ |
Rosen Penev | 91ed99f | 2022-03-12 23:54:18 -0800 | [diff] [blame] | 397 | __asm__ volatile ("stbar"); |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 398 | #else |
| 399 | #error Unknown and/or unsupported SPARC instruction set version detected. |
| 400 | #endif |
| 401 | #endif |
| 402 | } |
| 403 | |
| 404 | void mmio_writeb(uint8_t val, void *addr) |
| 405 | { |
| 406 | *(volatile uint8_t *) addr = val; |
| 407 | sync_primitive(); |
| 408 | } |
| 409 | |
| 410 | void mmio_writew(uint16_t val, void *addr) |
| 411 | { |
| 412 | *(volatile uint16_t *) addr = val; |
| 413 | sync_primitive(); |
| 414 | } |
| 415 | |
| 416 | void mmio_writel(uint32_t val, void *addr) |
| 417 | { |
| 418 | *(volatile uint32_t *) addr = val; |
| 419 | sync_primitive(); |
| 420 | } |
| 421 | |
| 422 | uint8_t mmio_readb(const void *addr) |
| 423 | { |
| 424 | return *(volatile const uint8_t *) addr; |
| 425 | } |
| 426 | |
| 427 | uint16_t mmio_readw(const void *addr) |
| 428 | { |
| 429 | return *(volatile const uint16_t *) addr; |
| 430 | } |
| 431 | |
| 432 | uint32_t mmio_readl(const void *addr) |
| 433 | { |
| 434 | return *(volatile const uint32_t *) addr; |
| 435 | } |
| 436 | |
| 437 | void mmio_readn(const void *addr, uint8_t *buf, size_t len) |
| 438 | { |
| 439 | memcpy(buf, addr, len); |
| 440 | return; |
| 441 | } |
| 442 | |
Nico Huber | d81637c | 2023-01-29 19:45:44 +0000 | [diff] [blame] | 443 | /* Read source-aligned to `align` bytes. `align` should be 4 or 8. */ |
| 444 | void mmio_readn_aligned(const void *addr, uint8_t *dst, size_t len, size_t align) |
| 445 | { |
| 446 | volatile const uint8_t *src = addr; |
| 447 | |
| 448 | /* align */ |
| 449 | for (; (uintptr_t)src % align && len > 0; --len, ++dst, ++src) |
| 450 | *dst = *src; |
| 451 | |
| 452 | /* copy aligned */ |
| 453 | if (align == 4) { |
| 454 | for (; len >= align; len -= align, dst += align, src += align) |
| 455 | *(uint32_t *)dst = *(volatile const uint32_t *)src; |
| 456 | } else if (align == 8) { |
| 457 | for (; len >= align; len -= align, dst += align, src += align) |
| 458 | *(uint64_t *)dst = *(volatile const uint64_t *)src; |
| 459 | } |
| 460 | |
| 461 | /* residue */ |
| 462 | for (; len > 0; --len, ++dst, ++src) |
| 463 | *dst = *src; |
| 464 | } |
| 465 | |
Thomas Heijligen | 3f4d35d | 2022-01-17 15:11:43 +0100 | [diff] [blame] | 466 | void mmio_le_writeb(uint8_t val, void *addr) |
| 467 | { |
| 468 | mmio_writeb(cpu_to_le8(val), addr); |
| 469 | } |
| 470 | |
| 471 | void mmio_le_writew(uint16_t val, void *addr) |
| 472 | { |
| 473 | mmio_writew(cpu_to_le16(val), addr); |
| 474 | } |
| 475 | |
| 476 | void mmio_le_writel(uint32_t val, void *addr) |
| 477 | { |
| 478 | mmio_writel(cpu_to_le32(val), addr); |
| 479 | } |
| 480 | |
| 481 | uint8_t mmio_le_readb(const void *addr) |
| 482 | { |
| 483 | return le_to_cpu8(mmio_readb(addr)); |
| 484 | } |
| 485 | |
| 486 | uint16_t mmio_le_readw(const void *addr) |
| 487 | { |
| 488 | return le_to_cpu16(mmio_readw(addr)); |
| 489 | } |
| 490 | |
| 491 | uint32_t mmio_le_readl(const void *addr) |
| 492 | { |
| 493 | return le_to_cpu32(mmio_readl(addr)); |
| 494 | } |
| 495 | |
| 496 | enum mmio_write_type { |
| 497 | mmio_write_type_b, |
| 498 | mmio_write_type_w, |
| 499 | mmio_write_type_l, |
| 500 | }; |
| 501 | |
| 502 | struct undo_mmio_write_data { |
| 503 | void *addr; |
| 504 | int reg; |
| 505 | enum mmio_write_type type; |
| 506 | union { |
| 507 | uint8_t bdata; |
| 508 | uint16_t wdata; |
| 509 | uint32_t ldata; |
| 510 | }; |
| 511 | }; |
| 512 | |
| 513 | static int undo_mmio_write(void *p) |
| 514 | { |
| 515 | struct undo_mmio_write_data *data = p; |
| 516 | msg_pdbg("Restoring MMIO space at %p\n", data->addr); |
| 517 | switch (data->type) { |
| 518 | case mmio_write_type_b: |
| 519 | mmio_writeb(data->bdata, data->addr); |
| 520 | break; |
| 521 | case mmio_write_type_w: |
| 522 | mmio_writew(data->wdata, data->addr); |
| 523 | break; |
| 524 | case mmio_write_type_l: |
| 525 | mmio_writel(data->ldata, data->addr); |
| 526 | break; |
| 527 | } |
| 528 | /* p was allocated in register_undo_mmio_write. */ |
| 529 | free(p); |
| 530 | return 0; |
| 531 | } |
| 532 | |
| 533 | #define register_undo_mmio_write(a, c) \ |
| 534 | { \ |
| 535 | struct undo_mmio_write_data *undo_mmio_write_data; \ |
| 536 | undo_mmio_write_data = malloc(sizeof(*undo_mmio_write_data)); \ |
| 537 | if (!undo_mmio_write_data) { \ |
| 538 | msg_gerr("Out of memory!\n"); \ |
| 539 | exit(1); \ |
| 540 | } \ |
| 541 | undo_mmio_write_data->addr = a; \ |
| 542 | undo_mmio_write_data->type = mmio_write_type_##c; \ |
| 543 | undo_mmio_write_data->c##data = mmio_read##c(a); \ |
| 544 | register_shutdown(undo_mmio_write, undo_mmio_write_data); \ |
| 545 | } |
| 546 | |
| 547 | #define register_undo_mmio_writeb(a) register_undo_mmio_write(a, b) |
| 548 | #define register_undo_mmio_writew(a) register_undo_mmio_write(a, w) |
| 549 | #define register_undo_mmio_writel(a) register_undo_mmio_write(a, l) |
| 550 | |
| 551 | void rmmio_writeb(uint8_t val, void *addr) |
| 552 | { |
| 553 | register_undo_mmio_writeb(addr); |
| 554 | mmio_writeb(val, addr); |
| 555 | } |
| 556 | |
| 557 | void rmmio_writew(uint16_t val, void *addr) |
| 558 | { |
| 559 | register_undo_mmio_writew(addr); |
| 560 | mmio_writew(val, addr); |
| 561 | } |
| 562 | |
| 563 | void rmmio_writel(uint32_t val, void *addr) |
| 564 | { |
| 565 | register_undo_mmio_writel(addr); |
| 566 | mmio_writel(val, addr); |
| 567 | } |
| 568 | |
| 569 | void rmmio_le_writeb(uint8_t val, void *addr) |
| 570 | { |
| 571 | register_undo_mmio_writeb(addr); |
| 572 | mmio_le_writeb(val, addr); |
| 573 | } |
| 574 | |
| 575 | void rmmio_le_writew(uint16_t val, void *addr) |
| 576 | { |
| 577 | register_undo_mmio_writew(addr); |
| 578 | mmio_le_writew(val, addr); |
| 579 | } |
| 580 | |
| 581 | void rmmio_le_writel(uint32_t val, void *addr) |
| 582 | { |
| 583 | register_undo_mmio_writel(addr); |
| 584 | mmio_le_writel(val, addr); |
| 585 | } |
| 586 | |
| 587 | void rmmio_valb(void *addr) |
| 588 | { |
| 589 | register_undo_mmio_writeb(addr); |
| 590 | } |
| 591 | |
| 592 | void rmmio_valw(void *addr) |
| 593 | { |
| 594 | register_undo_mmio_writew(addr); |
| 595 | } |
| 596 | |
| 597 | void rmmio_vall(void *addr) |
| 598 | { |
| 599 | register_undo_mmio_writel(addr); |
| 600 | } |