Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | // SPDX-License-Identifier: GPL-2.0 #include <linux/io.h> #include <linux/slab.h> #include <linux/memblock.h> #include <linux/cc_platform.h> #include <linux/pgtable.h> #include <asm/set_memory.h> #include <asm/realmode.h> #include <asm/tlbflush.h> #include <asm/crash.h> #include <asm/sev.h> struct real_mode_header *real_mode_header; u32 *trampoline_cr4_features; /* Hold the pgd entry used on booting additional CPUs */ pgd_t trampoline_pgd_entry; void load_trampoline_pgtable(void) { #ifdef CONFIG_X86_32 load_cr3(initial_page_table); #else /* * This function is called before exiting to real-mode and that will * fail with CR4.PCIDE still set. */ if (boot_cpu_has(X86_FEATURE_PCID)) cr4_clear_bits(X86_CR4_PCIDE); write_cr3(real_mode_header->trampoline_pgd); #endif /* * The CR3 write above will not flush global TLB entries. * Stale, global entries from previous page tables may still be * present. Flush those stale entries. * * This ensures that memory accessed while running with * trampoline_pgd is *actually* mapped into trampoline_pgd. */ __flush_tlb_all(); } void __init reserve_real_mode(void) { phys_addr_t mem; size_t size = real_mode_size_needed(); if (!size) return; WARN_ON(slab_is_available()); /* Has to be under 1M so we can execute real-mode AP code. */ mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20); if (!mem) pr_info("No sub-1M memory is available for the trampoline\n"); else set_real_mode_mem(mem); /* * Unconditionally reserve the entire fisrt 1M, see comment in * setup_arch(). */ memblock_reserve(0, SZ_1M); } static void __init sme_sev_setup_real_mode(struct trampoline_header *th) { #ifdef CONFIG_AMD_MEM_ENCRYPT if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) th->flags |= TH_FLAGS_SME_ACTIVE; if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { /* * Skip the call to verify_cpu() in secondary_startup_64 as it * will cause #VC exceptions when the AP can't handle them yet. */ th->start = (u64) secondary_startup_64_no_verify; if (sev_es_setup_ap_jump_table(real_mode_header)) panic("Failed to get/update SEV-ES AP Jump Table"); } #endif } static void __init setup_real_mode(void) { u16 real_mode_seg; const u32 *rel; u32 count; unsigned char *base; unsigned long phys_base; struct trampoline_header *trampoline_header; size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); #ifdef CONFIG_X86_64 u64 *trampoline_pgd; u64 efer; int i; #endif base = (unsigned char *)real_mode_header; /* * If SME is active, the trampoline area will need to be in * decrypted memory in order to bring up other processors * successfully. This is not needed for SEV. */ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); memcpy(base, real_mode_blob, size); phys_base = __pa(base); real_mode_seg = phys_base >> 4; rel = (u32 *) real_mode_relocs; /* 16-bit segment relocations. */ count = *rel++; while (count--) { u16 *seg = (u16 *) (base + *rel++); *seg = real_mode_seg; } /* 32-bit linear relocations. */ count = *rel++; while (count--) { u32 *ptr = (u32 *) (base + *rel++); *ptr += phys_base; } /* Must be performed *after* relocation. */ trampoline_header = (struct trampoline_header *) __va(real_mode_header->trampoline_header); #ifdef CONFIG_X86_32 trampoline_header->start = __pa_symbol(startup_32_smp); trampoline_header->gdt_limit = __BOOT_DS + 7; trampoline_header->gdt_base = __pa_symbol(boot_gdt); #else /* * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR * so we need to mask it out. */ rdmsrl(MSR_EFER, efer); trampoline_header->efer = efer & ~EFER_LMA; trampoline_header->start = (u64) secondary_startup_64; trampoline_cr4_features = &trampoline_header->cr4; *trampoline_cr4_features = mmu_cr4_features; trampoline_header->flags = 0; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); /* Map the real mode stub as virtual == physical */ trampoline_pgd[0] = trampoline_pgd_entry.pgd; /* * Include the entirety of the kernel mapping into the trampoline * PGD. This way, all mappings present in the normal kernel page * tables are usable while running on trampoline_pgd. */ for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) trampoline_pgd[i] = init_top_pgt[i].pgd; #endif sme_sev_setup_real_mode(trampoline_header); } /* * reserve_real_mode() gets called very early, to guarantee the * availability of low memory. This is before the proper kernel page * tables are set up, so we cannot set page permissions in that * function. Also trampoline code will be executed by APs so we * need to mark it executable at do_pre_smp_initcalls() at least, * thus run it as a early_initcall(). */ static void __init set_real_mode_permissions(void) { unsigned char *base = (unsigned char *) real_mode_header; size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); size_t ro_size = PAGE_ALIGN(real_mode_header->ro_end) - __pa(base); size_t text_size = PAGE_ALIGN(real_mode_header->ro_end) - real_mode_header->text_start; unsigned long text_start = (unsigned long) __va(real_mode_header->text_start); set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); } void __init init_real_mode(void) { if (!real_mode_header) panic("Real mode trampoline was not allocated"); setup_real_mode(); set_real_mode_permissions(); } static int __init do_init_real_mode(void) { x86_platform.realmode_init(); return 0; } early_initcall(do_init_real_mode); |