Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 | // SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for initializing the MMU * on the 4xx series of chips. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/smp.h> #include <asm/bootx.h> #include <asm/machdep.h> #include <asm/setup.h> #include <mm/mmu_decl.h> /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { int i; unsigned long zpr; /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. * The zone index bits (of ZSEL) in the PTE are used for software * indicators. We use the 4 upper bits of virtual address to select * the zone. We set all zones above TASK_SIZE to zero, allowing * only kernel access as indicated in the PTE. For zones below * TASK_SIZE, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ for (i = 0, zpr = 0; i < TASK_SIZE >> 28; i++) zpr |= 1 << (30 - i * 2); mtspr(SPRN_ZPR, zpr); flush_instruction_cache(); /* * Set up the real-mode cache parameters for the exception vector * handlers (which are run in real-mode). */ mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */ /* * Cache instruction and data space where the exception * vectors and the kernel live in real-mode. */ mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ } #define LARGE_PAGE_SIZE_16M (1<<24) #define LARGE_PAGE_SIZE_4M (1<<22) unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; p = 0; s = total_lowmem; if (IS_ENABLED(CONFIG_KFENCE)) return 0; if (debug_pagealloc_enabled()) return 0; if (strict_kernel_rwx_enabled()) return 0; while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW; pmdp = pmd_off_k(v); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; s -= LARGE_PAGE_SIZE_16M; } while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW; pmdp = pmd_off_k(v); *pmdp = __pmd(val); v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; s -= LARGE_PAGE_SIZE_4M; } mapped = total_lowmem - s; /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 16 and 4 MiB * pages. Consequently, restrict the top end of RAM currently * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ memblock_set_current_limit(mapped); return mapped; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* 40x can only access 16MB at the moment (see head_40x.S) */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); } |