Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/proc-fns.h * * Copyright (C) 1997-1999 Russell King * Copyright (C) 2000 Deep Blue Solutions Ltd */ #ifndef __ASM_PROCFNS_H #define __ASM_PROCFNS_H #ifdef __KERNEL__ #include <asm/glue-proc.h> #include <asm/page.h> #ifndef __ASSEMBLY__ struct mm_struct; /* * Don't change this structure - ASM code relies on it. */ struct processor { /* MISC * get data abort address/flags */ void (*_data_abort)(unsigned long pc); /* * Retrieve prefetch fault address */ unsigned long (*_prefetch_abort)(unsigned long lr); /* * Set up any processor specifics */ void (*_proc_init)(void); /* * Check for processor bugs */ void (*check_bugs)(void); /* * Disable any processor specifics */ void (*_proc_fin)(void); /* * Special stuff for a reset */ void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn)); /* * Idle the processor */ int (*_do_idle)(void); /* * Processor architecture specific */ /* * clean a virtual address range from the * D-cache without flushing the cache. */ void (*dcache_clean_area)(void *addr, int size); /* * Set the page table */ void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); /* * Set a possibly extended PTE. Non-extended PTEs should * ignore 'ext'. */ #ifdef CONFIG_ARM_LPAE void (*set_pte_ext)(pte_t *ptep, pte_t pte); #else void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); #endif /* Suspend/resume */ unsigned int suspend_size; void (*do_suspend)(void *); void (*do_resume)(void *); }; #ifndef MULTI_CPU static inline void init_proc_vtable(const struct processor *p) { } extern void cpu_proc_init(void); extern void cpu_proc_fin(void); extern int cpu_do_idle(void); extern void cpu_dcache_clean_area(void *, int); extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); #ifdef CONFIG_ARM_LPAE extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); #else extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); #endif extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn)); /* These three are private to arch/arm/kernel/suspend.c */ extern void cpu_do_suspend(void *); extern void cpu_do_resume(void *); #else extern struct processor processor; #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) #include <linux/smp.h> /* * This can't be a per-cpu variable because we need to access it before * per-cpu has been initialised. We have a couple of functions that are * called in a pre-emptible context, and so can't use smp_processor_id() * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the * function pointers for these are identical across all CPUs. */ extern struct processor *cpu_vtable[]; #define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f #define PROC_TABLE(f) cpu_vtable[0]->f static inline void init_proc_vtable(const struct processor *p) { unsigned int cpu = smp_processor_id(); *cpu_vtable[cpu] = *p; WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != cpu_vtable[0]->dcache_clean_area); WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != cpu_vtable[0]->set_pte_ext); } #else #define PROC_VTABLE(f) processor.f #define PROC_TABLE(f) processor.f static inline void init_proc_vtable(const struct processor *p) { processor = *p; } #endif #define cpu_proc_init PROC_VTABLE(_proc_init) #define cpu_check_bugs PROC_VTABLE(check_bugs) #define cpu_proc_fin PROC_VTABLE(_proc_fin) #define cpu_reset PROC_VTABLE(reset) #define cpu_do_idle PROC_VTABLE(_do_idle) #define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) #define cpu_set_pte_ext PROC_TABLE(set_pte_ext) #define cpu_do_switch_mm PROC_VTABLE(switch_mm) /* These two are private to arch/arm/kernel/suspend.c */ #define cpu_do_suspend PROC_VTABLE(do_suspend) #define cpu_do_resume PROC_VTABLE(do_resume) #endif extern void cpu_resume(void); #include <asm/memory.h> #ifdef CONFIG_MMU #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) #ifdef CONFIG_ARM_LPAE #define cpu_get_ttbr(nr) \ ({ \ u64 ttbr; \ __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ : "=r" (ttbr)); \ ttbr; \ }) #define cpu_get_pgd() \ ({ \ u64 pg = cpu_get_ttbr(0); \ pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ (pgd_t *)phys_to_virt(pg); \ }) #else #define cpu_get_pgd() \ ({ \ unsigned long pg; \ __asm__("mrc p15, 0, %0, c2, c0, 0" \ : "=r" (pg) : : "cc"); \ pg &= ~0x3fff; \ (pgd_t *)phys_to_virt(pg); \ }) #endif #else /*!CONFIG_MMU */ #define cpu_switch_mm(pgd,mm) { } #endif #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ |