Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 | /* * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> */ #include <linux/linkage.h> #include <linux/threads.h> #include <linux/init.h> #include <asm/segment.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/cache.h> #include <asm/processor-flags.h> #include <asm/percpu.h> #include <asm/nops.h> #include "../entry/calling.h" #include <asm/export.h> #ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> #include <asm/paravirt.h> #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #else #define GET_CR2_INTO(reg) movq %cr2, reg #define INTERRUPT_RETURN iretq #endif /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE * because we need identity-mapped pages. * */ #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) L4_START_KERNEL = pgd_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) .text __HEAD .code64 .globl startup_64 startup_64: /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table * for us. These identity mapped page tables map all of the * kernel pages and possibly all of memory. * * %rsi holds a physical pointer to real_mode_data. * * We come here either directly from a 64bit bootloader, or from * arch/x86/boot/compressed/head_64.S. * * We only come here initially at boot nothing else comes here. * * Since we may be loaded at an address different from what we were * compiled to run at we first fixup the physical addresses in our page * tables and then reload them. */ /* * Setup stack for verify_cpu(). "-8" because initial_stack is defined * this way, see below. Our best guess is a NULL ptr for stack * termination heuristics and we don't want to break anything which * might depend on it (kgdb, ...). */ leaq (__end_init_task - 8)(%rip), %rsp /* Sanitize CPU configuration */ call verify_cpu /* * Compute the delta between the address I am compiled to run at and the * address I am actually running at. */ leaq _text(%rip), %rbp subq $_text - __START_KERNEL_map, %rbp /* Is the address not 2M aligned? */ testl $~PMD_PAGE_MASK, %ebp jnz bad_address /* * Is the address too large? */ leaq _text(%rip), %rax shrq $MAX_PHYSMEM_BITS, %rax jnz bad_address /* * Fixup the physical addresses in the page table */ addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) addq %rbp, level3_kernel_pgt + (510*8)(%rip) addq %rbp, level3_kernel_pgt + (511*8)(%rip) addq %rbp, level2_fixmap_pgt + (506*8)(%rip) /* * Set up the identity mapping for the switchover. These * entries should *NOT* have the global bit set! This also * creates a bunch of nonsense entries but that is fine -- * it avoids problems around wraparound. */ leaq _text(%rip), %rdi leaq early_level4_pgt(%rip), %rbx movq %rdi, %rax shrq $PGDIR_SHIFT, %rax leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx movq %rdx, 0(%rbx,%rax,8) movq %rdx, 8(%rbx,%rax,8) addq $4096, %rdx movq %rdi, %rax shrq $PUD_SHIFT, %rax andl $(PTRS_PER_PUD-1), %eax movq %rdx, 4096(%rbx,%rax,8) incl %eax andl $(PTRS_PER_PUD-1), %eax movq %rdx, 4096(%rbx,%rax,8) addq $8192, %rbx movq %rdi, %rax shrq $PMD_SHIFT, %rdi addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax leaq (_end - 1)(%rip), %rcx shrq $PMD_SHIFT, %rcx subq %rdi, %rcx incl %ecx 1: andq $(PTRS_PER_PMD - 1), %rdi movq %rax, (%rbx,%rdi,8) incq %rdi addq $PMD_SIZE, %rax decl %ecx jnz 1b /* * Fixup the kernel text+data virtual addresses. Note that * we might write invalid pmds, when the kernel is relocated * cleanup_highmap() fixes this up along with the mappings * beyond _end. */ leaq level2_kernel_pgt(%rip), %rdi leaq 4096(%rdi), %r8 /* See if it is a valid page table entry */ 1: testb $1, 0(%rdi) jz 2f addq %rbp, 0(%rdi) /* Go to the next page */ 2: addq $8, %rdi cmp %r8, %rdi jne 1b /* Fixup phys_base */ addq %rbp, phys_base(%rip) movq $(early_level4_pgt - __START_KERNEL_map), %rax jmp 1f ENTRY(secondary_startup_64) /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. * * %rsi holds a physical pointer to real_mode_data. * * We come here either from startup_64 (using physical addresses) * or from trampoline.S (using virtual addresses). * * Using virtual addresses from trampoline.S removes the need * to have any identity mapped pages in the kernel page table * after the boot processor executes this code. */ /* Sanitize CPU configuration */ call verify_cpu movq $(init_level4_pgt - __START_KERNEL_map), %rax 1: /* Enable PAE mode and PGE */ movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx movq %rcx, %cr4 /* Setup early boot stage 4 level pagetables. */ addq phys_base(%rip), %rax movq %rax, %cr3 /* Ensure I am executing from virtual addresses */ movq $1f, %rax jmp *%rax 1: /* Check if nx is implemented */ movl $0x80000001, %eax cpuid movl %edx,%edi /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ btl $20,%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ X86_CR0_PG) movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 /* Setup a boot time stack */ movq initial_stack(%rip), %rsp /* zero EFLAGS after setting rsp */ pushq $0 popfq /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here * because in 32bit we couldn't load a 64bit linear address. */ lgdt early_gdt_descr(%rip) /* set up data segments */ xorl %eax,%eax movl %eax,%ds movl %eax,%ss movl %eax,%es /* * We don't really need to load %fs or %gs, but load them anyway * to kill any stale realmode selectors. This allows execution * under VT hardware. */ movl %eax,%fs movl %eax,%gs /* Set up %gs. * * The base of %gs always points to the bottom of the irqstack * union. If the stack protector canary is enabled, it is * located at %gs:40. Note that, on SMP, the boot cpu uses * init data section till per cpu areas are set up. */ movl $MSR_GS_BASE,%ecx movl initial_gs(%rip),%eax movl initial_gs+4(%rip),%edx wrmsr /* rsi is pointer to real mode structure with interesting info. pass it to C */ movq %rsi, %rdi /* Finally jump to run C code and to be on real kernel address * Since we are running on identity-mapped space we have to jump * to the full 64bit address, this is only possible as indirect * jump. In addition we need to ensure %cs is set so we make this * a far return. * * Note: do not change to far jump indirect with 64bit offset. * * AMD does not support far jump indirect with 64bit offset. * AMD64 Architecture Programmer's Manual, Volume 3: states only * JMP FAR mem16:16 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * JMP FAR mem16:32 FF /5 Far jump indirect, * with the target specified by a far pointer in memory. * * Intel64 does support 64bit offset. * Software Developer Manual Vol 2: states: * FF /5 JMP m16:16 Jump far, absolute indirect, * address given in m16:16 * FF /5 JMP m16:32 Jump far, absolute indirect, * address given in m16:32. * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, * address given in m16:64. */ movq initial_code(%rip),%rax pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs pushq %rax # target address in negative space lretq ENDPROC(secondary_startup_64) #include "verify_cpu.S" #ifdef CONFIG_HOTPLUG_CPU /* * Boot CPU0 entry point. It's called from play_dead(). Everything has been set * up already except stack. We just set up stack here. Then call * start_secondary(). */ ENTRY(start_cpu0) movq initial_stack(%rip),%rsp movq initial_code(%rip),%rax pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs pushq %rax # target address in negative space lretq ENDPROC(start_cpu0) #endif /* Both SMP bootup and ACPI suspend change these variables */ __REFDATA .balign 8 GLOBAL(initial_code) .quad x86_64_start_kernel GLOBAL(initial_gs) .quad INIT_PER_CPU_VAR(irq_stack_union) GLOBAL(initial_stack) .quad init_thread_union+THREAD_SIZE-8 __FINITDATA bad_address: jmp bad_address __INIT ENTRY(early_idt_handler_array) # 104(%rsp) %rflags # 96(%rsp) %cs # 88(%rsp) %rip # 80(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 pushq $0 # Dummy error code, to make stack frame uniform .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr ENDPROC(early_idt_handler_array) early_idt_handler_common: /* * The stack is the hardware frame, an error code or zero, and the * vector number. */ cld incl early_recursion_flag(%rip) /* The vector number is currently in the pt_regs->di slot. */ pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* RSI = vector number */ movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ pushq %r9 /* pt_regs->r9 */ pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ pushq %rbx /* pt_regs->bx */ pushq %rbp /* pt_regs->bp */ pushq %r12 /* pt_regs->r12 */ pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ cmpq $14,%rsi /* Page fault? */ jnz 10f GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ call early_make_pgtable andl %eax,%eax jz 20f /* All good */ 10: movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ call early_fixup_exception 20: decl early_recursion_flag(%rip) jmp restore_regs_and_iret ENDPROC(early_idt_handler_common) __INITDATA .balign 4 GLOBAL(early_recursion_flag) .long 0 #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ GLOBAL(name) /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ .rept (COUNT) ; \ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ i = i + 1 ; \ .endr __INITDATA NEXT_PAGE(early_level4_pgt) .fill 511,8,0 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 .data #ifndef CONFIG_XEN NEXT_PAGE(init_level4_pgt) .fill 512,8,0 #else NEXT_PAGE(init_level4_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .fill 511, 8, 0 NEXT_PAGE(level2_ident_pgt) /* Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) #endif NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE NEXT_PAGE(level2_kernel_pgt) /* * 512 MB kernel mapping. We spend a full page on this pagetable * anyway. * * The kernel code+data+bss must not be bigger than that. * * (NOTE: at +512MB starts the module area, see MODULES_VADDR. * If you want to increase this then increase MODULES_VADDR * too.) */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) .fill 506,8,0 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ .fill 5,8,0 NEXT_PAGE(level1_fixmap_pgt) .fill 512,8,0 #undef PMDS .data .align 16 .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: .quad INIT_PER_CPU_VAR(gdt_page) ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE EXPORT_SYMBOL(empty_zero_page) |