Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | // SPDX-License-Identifier: GPL-2.0 #include <linux/errno.h> #include <linux/init.h> #include <asm/sclp.h> #include <asm/sections.h> #include <asm/mem_detect.h> #include <asm/sparsemem.h> #include "compressed/decompressor.h" #include "boot.h" struct mem_detect_info __bootdata(mem_detect); /* up to 256 storage elements, 1020 subincrements each */ #define ENTRIES_EXTENDED_MAX \ (256 * (1020 / 2) * sizeof(struct mem_detect_block)) /* * To avoid corrupting old kernel memory during dump, find lowest memory * chunk possible either right after the kernel end (decompressed kernel) or * after initrd (if it is present and there is no hole between the kernel end * and initrd) */ static void *mem_detect_alloc_extended(void) { unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && INITRD_START < offset + ENTRIES_EXTENDED_MAX) offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); return (void *)offset; } static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n) { if (n < MEM_INLINED_ENTRIES) return &mem_detect.entries[n]; if (unlikely(!mem_detect.entries_extended)) mem_detect.entries_extended = mem_detect_alloc_extended(); return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES]; } /* * sequential calls to add_mem_detect_block with adjacent memory areas * are merged together into single memory block. */ void add_mem_detect_block(u64 start, u64 end) { struct mem_detect_block *block; if (mem_detect.count) { block = __get_mem_detect_block_ptr(mem_detect.count - 1); if (block->end == start) { block->end = end; return; } } block = __get_mem_detect_block_ptr(mem_detect.count); block->start = start; block->end = end; mem_detect.count++; } static int __diag260(unsigned long rx1, unsigned long rx2) { register unsigned long _rx1 asm("2") = rx1; register unsigned long _rx2 asm("3") = rx2; register unsigned long _ry asm("4") = 0x10; /* storage configuration */ int rc = -1; /* fail */ unsigned long reg1, reg2; psw_t old = S390_lowcore.program_new_psw; asm volatile( " epsw %0,%1\n" " st %0,%[psw_pgm]\n" " st %1,%[psw_pgm]+4\n" " larl %0,1f\n" " stg %0,%[psw_pgm]+8\n" " diag %[rx],%[ry],0x260\n" " ipm %[rc]\n" " srl %[rc],28\n" "1:\n" : "=&d" (reg1), "=&a" (reg2), [psw_pgm] "=Q" (S390_lowcore.program_new_psw), [rc] "+&d" (rc), [ry] "+d" (_ry) : [rx] "d" (_rx1), "d" (_rx2) : "cc", "memory"); S390_lowcore.program_new_psw = old; return rc == 0 ? _ry : -1; } static int diag260(void) { int rc, i; struct { unsigned long start; unsigned long end; } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */ memset(storage_extents, 0, sizeof(storage_extents)); rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents)); if (rc == -1) return -1; for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++) add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1); return 0; } static int tprot(unsigned long addr) { unsigned long pgm_addr; int rc = -EFAULT; psw_t old = S390_lowcore.program_new_psw; S390_lowcore.program_new_psw.mask = __extract_psw(); asm volatile( " larl %[pgm_addr],1f\n" " stg %[pgm_addr],%[psw_pgm_addr]\n" " tprot 0(%[addr]),0\n" " ipm %[rc]\n" " srl %[rc],28\n" "1:\n" : [pgm_addr] "=&d"(pgm_addr), [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr), [rc] "+&d"(rc) : [addr] "a"(addr) : "cc", "memory"); S390_lowcore.program_new_psw = old; return rc; } static void search_mem_end(void) { unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */ unsigned long offset = 0; unsigned long pivot; while (range > 1) { range >>= 1; pivot = offset + range; if (!tprot(pivot << 20)) offset = pivot; } add_mem_detect_block(0, (offset + 1) << 20); } unsigned long detect_memory(void) { unsigned long max_physmem_end; sclp_early_get_memsize(&max_physmem_end); if (!sclp_early_read_storage_info()) { mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO; return max_physmem_end; } if (!diag260()) { mem_detect.info_source = MEM_DETECT_DIAG260; return max_physmem_end; } if (max_physmem_end) { add_mem_detect_block(0, max_physmem_end); mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO; return max_physmem_end; } search_mem_end(); mem_detect.info_source = MEM_DETECT_BIN_SEARCH; return get_mem_detect_end(); } |