Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 | /* * Low-level CPU initialisation * Based on arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King * Copyright (C) 2003-2012 ARM Ltd. * Authors: Catalin Marinas <catalin.marinas@arm.com> * Will Deacon <will.deacon@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/irqchip/arm-gic-v3.h> #include <asm/assembler.h> #include <asm/boot.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/cputype.h> #include <asm/elf.h> #include <asm/kernel-pgtable.h> #include <asm/kvm_arm.h> #include <asm/memory.h> #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/smp.h> #include <asm/sysreg.h> #include <asm/thread_info.h> #include <asm/virt.h> #include "efi-header.S" #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) #if (TEXT_OFFSET & 0xfff) != 0 #error TEXT_OFFSET must be at least 4KB aligned #elif (PAGE_OFFSET & 0x1fffff) != 0 #error PAGE_OFFSET must be at least 2MB aligned #elif TEXT_OFFSET > 0x1fffff #error TEXT_OFFSET must be less than 2MB #endif /* * Kernel startup entry point. * --------------------------- * * The requirements are: * MMU = off, D-cache = off, I-cache = on or off, * x0 = physical address to the FDT blob. * * This code is mostly position independent so you call this at * __pa(PAGE_OFFSET + TEXT_OFFSET). * * Note that the callee-saved registers are used for storing variables * that are useful before the MMU is enabled. The allocations are described * in the entry routines. */ __HEAD _head: /* * DO NOT MODIFY. Image header expected by Linux boot-loaders. */ #ifdef CONFIG_EFI /* * This add instruction has no meaningful effect except that * its opcode forms the magic "MZ" signature required by UEFI. */ add x13, x18, #0x16 b stext #else b stext // branch to kernel start, magic .long 0 // reserved #endif le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian le64sym _kernel_size_le // Effective size of kernel image, little-endian le64sym _kernel_flags_le // Informative flags, little-endian .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved .ascii "ARM\x64" // Magic number #ifdef CONFIG_EFI .long pe_header - _head // Offset to the PE header. pe_header: __EFI_PE_HEADER #else .long 0 // reserved #endif __INIT /* * The following callee saved general purpose registers are used on the * primary lowlevel boot path: * * Register Scope Purpose * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 * x23 stext() .. start_kernel() physical misalignment/KASLR offset * x28 __create_page_tables() callee preserved temp register * x19/x20 __primary_switch() callee preserved temp registers */ ENTRY(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w0=cpu_boot_mode adrp x23, __PHYS_OFFSET and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag bl __create_page_tables /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for * details. * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ preserve_boot_args: mov x21, x0 // x21=FDT adr_l x0, boot_args // record the contents of stp x21, x1, [x0] // x0 .. x3 at kernel entry stp x2, x3, [x0, #16] dmb sy // needed before dc ivac with // MMU off mov x1, #0x20 // 4 x 8 bytes b __inval_dcache_area // tail call ENDPROC(preserve_boot_args) /* * Macro to create a table entry to the next page. * * tbl: page table address * virt: virtual address * shift: #imm page table shift * ptrs: #imm pointers per table page * * Preserves: virt * Corrupts: ptrs, tmp1, tmp2 * Returns: tbl -> next level table page address */ .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 add \tmp1, \tbl, #PAGE_SIZE phys_to_pte \tmp2, \tmp1 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type lsr \tmp1, \virt, #\shift sub \ptrs, \ptrs, #1 and \tmp1, \tmp1, \ptrs // table index str \tmp2, [\tbl, \tmp1, lsl #3] add \tbl, \tbl, #PAGE_SIZE // next level table page .endm /* * Macro to populate page table entries, these entries can be pointers to the next level * or last level entries pointing to physical memory. * * tbl: page table address * rtbl: pointer to page table or physical memory * index: start index to write * eindex: end index to write - [index, eindex] written to * flags: flags for pagetable entry to or in * inc: increment to rtbl between each entry * tmp1: temporary variable * * Preserves: tbl, eindex, flags, inc * Corrupts: index, tmp1 * Returns: rtbl */ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 .Lpe\@: phys_to_pte \tmp1, \rtbl orr \tmp1, \tmp1, \flags // tmp1 = table entry str \tmp1, [\tbl, \index, lsl #3] add \rtbl, \rtbl, \inc // rtbl = pa next level add \index, \index, #1 cmp \index, \eindex b.ls .Lpe\@ .endm /* * Compute indices of table entries from virtual address range. If multiple entries * were needed in the previous page table level then the next page table level is assumed * to be composed of multiple pages. (This effectively scales the end index). * * vstart: virtual address of start of range * vend: virtual address of end of range * shift: shift used to transform virtual address into index * ptrs: number of entries in page table * istart: index in table corresponding to vstart * iend: index in table corresponding to vend * count: On entry: how many extra entries were required in previous level, scales * our end index. * On exit: returns how many extra entries required for next page table level * * Preserves: vstart, vend, shift, ptrs * Returns: istart, iend, count */ .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count lsr \iend, \vend, \shift mov \istart, \ptrs sub \istart, \istart, #1 and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) mov \istart, \ptrs mul \istart, \istart, \count add \iend, \iend, \istart // iend += (count - 1) * ptrs // our entries span multiple tables lsr \istart, \vstart, \shift mov \count, \ptrs sub \count, \count, #1 and \istart, \istart, \count sub \count, \iend, \istart .endm /* * Map memory for specified virtual address range. Each level of page table needed supports * multiple entries. If a level requires n entries the next page table level is assumed to be * formed from n pages. * * tbl: location of page table * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) * vstart: start address to map * vend: end address to map - we map [vstart, vend] * flags: flags to use to map last level entries * phys: physical address corresponding to vstart - physical memory is contiguous * pgds: the number of pgd entries * * Temporaries: istart, iend, tmp, count, sv - these need to be different registers * Preserves: vstart, vend, flags * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv */ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv add \rtbl, \tbl, #PAGE_SIZE mov \sv, \rtbl mov \count, #0 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv mov \sv, \rtbl #if SWAPPER_PGTABLE_LEVELS > 3 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv mov \sv, \rtbl #endif #if SWAPPER_PGTABLE_LEVELS > 2 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp mov \tbl, \sv #endif compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1 populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp .endm /* * Setup the initial page tables. We only setup the barest amount which is * required to get the kernel running. The following sections are required: * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled */ __create_page_tables: mov x28, lr /* * Invalidate the idmap and swapper page tables to avoid potential * dirty cache lines being evicted. */ adrp x0, idmap_pg_dir adrp x1, swapper_pg_end sub x1, x1, x0 bl __inval_dcache_area /* * Clear the idmap and swapper page tables. */ adrp x0, idmap_pg_dir adrp x1, swapper_pg_end sub x1, x1, x0 1: stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 subs x1, x1, #64 b.ne 1b mov x7, SWAPPER_MM_MMUFLAGS /* * Create the identity mapping. */ adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) /* * VA_BITS may be too small to allow for an ID mapping to be created * that covers system RAM if that is located sufficiently high in the * physical address space. So for the ID map, use an extended virtual * range in that case, and configure an additional translation level * if needed. * * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the * entire ID map region can be mapped. As T0SZ == (64 - #bits used), * this number conveniently equals the number of leading zeroes in * the physical address of __idmap_text_end. */ adrp x5, __idmap_text_end clz x5, x5 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? b.ge 1f // .. then skip VA range extension adr_l x6, idmap_t0sz str x5, [x6] dmb sy dc ivac, x6 // Invalidate potentially stale cache line #if (VA_BITS < 48) #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT)) /* * If VA_BITS < 48, we have to configure an additional table level. * First, we have to verify our assumption that the current value of * VA_BITS was chosen such that all translation levels are fully * utilised, and that lowering T0SZ will always result in an additional * translation level to be configured. */ #if VA_BITS != EXTRA_SHIFT #error "Mismatch between VA_BITS and page size/number of translation levels" #endif mov x4, EXTRA_PTRS create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6 #else /* * If VA_BITS == 48, we don't have to configure an additional * translation level, but the top-level table has more entries. */ mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT) str_l x4, idmap_ptrs_per_pgd, x5 #endif 1: ldr_l x4, idmap_ptrs_per_pgd mov x5, x3 // __pa(__idmap_text_start) adr_l x6, __idmap_text_end // __pa(__idmap_text_end) map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 /* * Map the kernel image (starting with PHYS_OFFSET). */ adrp x0, swapper_pg_dir mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement mov x4, PTRS_PER_PGD adrp x6, _end // runtime __pa(_end) adrp x3, _text // runtime __pa(_text) sub x6, x6, x3 // _end - _text add x6, x6, x5 // runtime __va(_end) map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14 /* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate the idmap and swapper page * tables again to remove any speculatively loaded cache lines. */ adrp x0, idmap_pg_dir adrp x1, swapper_pg_end sub x1, x1, x0 dmb sy bl __inval_dcache_area ret x28 ENDPROC(__create_page_tables) .ltorg /* * The following fragment of code is executed with the MMU enabled. * * x0 = __PHYS_OFFSET */ __primary_switched: adrp x4, init_thread_union add sp, x4, #THREAD_SIZE adr_l x5, init_task msr sp_el0, x5 // Save thread_info adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb stp xzr, x30, [sp, #-16]! mov x29, sp str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between sub x4, x4, x0 // the kernel virtual and str_l x4, kimage_voffset, x5 // physical mappings // Clear BSS adr_l x0, __bss_start mov x1, xzr adr_l x2, __bss_stop sub x2, x2, x0 bl __pi_memset dsb ishst // Make zero page visible to PTW #ifdef CONFIG_KASAN bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f mov x0, x21 // pass FDT address in x0 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset ldp x29, x30, [sp], #16 // we must enable KASLR, return ret // to __primary_switch() 0: #endif add sp, sp, #16 mov x29, #0 mov x30, #0 b start_kernel ENDPROC(__primary_switched) /* * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ .section ".idmap.text","awx" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET /* * If we're fortunate enough to boot at EL2, ensure that the world is * sane before dropping to EL1. * * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ ENTRY(el2_setup) msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 1f mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) msr sctlr_el1, x0 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 isb ret 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) msr sctlr_el2, x0 #ifdef CONFIG_ARM64_VHE /* * Check for VHE being present. For the rest of the EL2 setup, * x2 being non-zero indicates that we do have VHE, and that the * kernel is intended to run at EL2. */ mrs x2, id_aa64mmfr1_el1 ubfx x2, x2, #8, #4 #else mov x2, xzr #endif /* Hyp configuration. */ mov_q x0, HCR_HOST_NVHE_FLAGS cbz x2, set_hcr mov_q x0, HCR_HOST_VHE_FLAGS set_hcr: msr hcr_el2, x0 isb /* * Allow Non-secure EL1 and EL0 to access physical timer and counter. * This is not necessary for VHE, since the host kernel runs in EL2, * and EL0 accesses are configured in the later stage of boot process. * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in * EL2. */ cbnz x2, 1f mrs x0, cnthctl_el2 orr x0, x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 1: msr cntvoff_el2, xzr // Clear virtual offset #ifdef CONFIG_ARM_GIC_V3 /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 msr_s SYS_ICC_SRE_EL2, x0 isb // Make sure SRE is now set mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, tbz x0, #0, 3f // and check that it sticks msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 3: #endif /* Populate ID registers. */ mrs x0, midr_el1 mrs x1, mpidr_el1 msr vpidr_el2, x0 msr vmpidr_el2, x1 #ifdef CONFIG_COMPAT msr hstr_el2, xzr // Disable CP15 traps to EL2 #endif /* EL2 debug */ mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer sbfx x0, x1, #8, #4 cmp x0, #1 b.lt 4f // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to 4: csel x3, xzr, x0, lt // all PMU counters from EL1 /* Statistical profiling */ ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer cbz x0, 7f // Skip if SPE not present cbnz x2, 6f // VHE? mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) cbnz x4, 5f // then permit sampling of physical mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ 1 << SYS_PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter 5: mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) orr x3, x3, x1 // If we don't have VHE, then b 7f // use EL1&0 translation. 6: // For VHE, use EL2 translation orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 7: msr mdcr_el2, x3 // Configure debug traps /* LORegions */ mrs x1, id_aa64mmfr1_el1 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 cbz x0, 1f msr_s SYS_LORC_EL1, xzr 1: /* Stage-2 translation */ msr vttbr_el2, xzr cbz x2, install_el2_stub mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 isb ret install_el2_stub: /* * When VHE is not in use, early init of EL2 and EL1 needs to be * done here. * When VHE _is_ in use, EL1 will not be used in the host and * requires no configuration, and all non-hyp-specific EL2 setup * will be done via the _EL1 system register aliases in __cpu_setup. */ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) msr sctlr_el1, x0 /* Coprocessor traps. */ mov x0, #0x33ff msr cptr_el2, x0 // Disable copro. traps to EL2 /* SVE register access */ mrs x1, id_aa64pfr0_el1 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 cbz x1, 7f bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps msr cptr_el2, x0 // Disable copro. traps to EL2 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. /* Hypervisor stub */ 7: adr_l x0, __hyp_stub_vectors msr vbar_el2, x0 /* spsr */ mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, x0 msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 eret ENDPROC(el2_setup) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in w0. See arch/arm64/include/asm/virt.h for more info. */ set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 1: str w0, [x1] // This CPU has booted in EL1 dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret ENDPROC(set_cpu_boot_mode_flag) /* * These values are written with the MMU off, but read with the MMU on. * Writers will invalidate the corresponding address, discarding up to a * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures * sufficient alignment that the CWG doesn't overlap another section. */ .pushsection ".mmuoff.data.write", "aw" /* * We need to find out the CPU boot mode long after boot, so we need to * store it in a writable variable. * * This is not in .bss, because we set it sufficiently early that the boot-time * zeroing of .bss would clobber it. */ ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL1 /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. */ ENTRY(__early_cpu_boot_status) .long 0 .popsection /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK and x0, x0, x1 adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] cmp x4, x0 b.eq secondary_startup wfe b pen ENDPROC(secondary_holding_pen) /* * Secondary entry point that jumps straight into the kernel. Only to * be used where CPUs are brought online dynamically by the kernel. */ ENTRY(secondary_entry) bl el2_setup // Drop to EL1 bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) secondary_startup: /* * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva bl __cpu_setup // initialise processor bl __enable_mmu ldr x8, =__secondary_switched br x8 ENDPROC(secondary_startup) __secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb adr_l x0, secondary_data ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack mov sp, x1 ldr x2, [x0, #CPU_BOOT_TASK] msr sp_el0, x2 mov x29, #0 mov x30, #0 b secondary_start_kernel ENDPROC(__secondary_switched) /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. * * update_early_cpu_boot_status tmp, status * - Corrupts tmp1, tmp2 * - Writes 'status' to __early_cpu_boot_status and makes sure * it is committed to memory. */ .macro update_early_cpu_boot_status status, tmp1, tmp2 mov \tmp2, #\status adr_l \tmp1, __early_cpu_boot_status str \tmp2, [\tmp1] dmb sy dc ivac, \tmp1 // Invalidate potentially stale cache line .endm /* * Enable the MMU. * * x0 = SCTLR_EL1 value for turning on the MMU. * * Returns to the caller via x30/lr. This requires the caller to be covered * by the .idmap.text section. * * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ ENTRY(__enable_mmu) mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED b.ne __no_granule_support update_early_cpu_boot_status 0, x1, x2 adrp x1, idmap_pg_dir adrp x2, swapper_pg_dir phys_to_ttbr x3, x1 phys_to_ttbr x4, x2 msr ttbr0_el1, x3 // load TTBR0 msr ttbr1_el1, x4 // load TTBR1 isb msr sctlr_el1, x0 isb /* * Invalidate the local I-cache so that any instructions fetched * speculatively from the PoC are discarded, since they may have * been dynamically patched at the PoU. */ ic iallu dsb nsh isb ret ENDPROC(__enable_mmu) ENTRY(__cpu_secondary_check52bitva) #ifdef CONFIG_ARM64_52BIT_VA ldr_l x0, vabits_user cmp x0, #52 b.ne 2f mrs_s x0, SYS_ID_AA64MMFR2_EL1 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) cbnz x0, 2f adr_l x0, va52mismatch mov w1, #1 strb w1, [x0] dmb sy dc ivac, x0 // Invalidate potentially stale cache line update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1 1: wfe wfi b 1b #endif 2: ret ENDPROC(__cpu_secondary_check52bitva) __no_granule_support: /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 1: wfe wfi b 1b ENDPROC(__no_granule_support) #ifdef CONFIG_RELOCATABLE __relocate_kernel: /* * Iterate over each entry in the relocation table, and apply the * relocations in place. */ ldr w9, =__rela_offset // offset to reloc table ldr w10, =__rela_size // size of reloc table mov_q x11, KIMAGE_VADDR // default virtual offset add x11, x11, x23 // actual virtual offset add x9, x9, x11 // __va(.rela) add x10, x9, x10 // __va(.rela) + sizeof(.rela) 0: cmp x9, x10 b.hs 1f ldp x11, x12, [x9], #24 ldr x13, [x9, #-8] cmp w12, #R_AARCH64_RELATIVE b.ne 0b add x13, x13, x23 // relocate str x13, [x11, x23] b 0b 1: ret ENDPROC(__relocate_kernel) #endif __primary_switch: #ifdef CONFIG_RANDOMIZE_BASE mov x19, x0 // preserve new SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value #endif bl __enable_mmu #ifdef CONFIG_RELOCATABLE bl __relocate_kernel #ifdef CONFIG_RANDOMIZE_BASE ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET blr x8 /* * If we return here, we have a KASLR displacement in x23 which we need * to take into account by discarding the current kernel mapping and * creating a new one. */ pre_disable_mmu_workaround msr sctlr_el1, x20 // disable the MMU isb bl __create_page_tables // recreate kernel mapping tlbi vmalle1 // Remove any stale TLB entries dsb nsh msr sctlr_el1, x19 // re-enable the MMU isb ic iallu // flush instructions fetched dsb nsh // via old mapping isb bl __relocate_kernel #endif #endif ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET br x8 ENDPROC(__primary_switch) |