Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/processor.h * * Copyright (C) 1995-1999 Russell King */ #ifndef __ASM_ARM_PROCESSOR_H #define __ASM_ARM_PROCESSOR_H #ifdef __KERNEL__ #include <asm/hw_breakpoint.h> #include <asm/ptrace.h> #include <asm/types.h> #include <asm/unified.h> #include <asm/vdso/processor.h> #ifdef __KERNEL__ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ TASK_SIZE : TASK_SIZE_26) #define STACK_TOP_MAX TASK_SIZE #endif struct debug_info { #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; #endif }; struct thread_struct { /* fault info */ unsigned long address; unsigned long trap_no; unsigned long error_code; /* debugging */ struct debug_info debug; }; /* * Everything usercopied to/from thread_struct is statically-sized, so * no hardened usercopy whitelist is needed. */ static inline void arch_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { *offset = *size = 0; } #define INIT_THREAD { } #define start_thread(regs,pc,sp) \ ({ \ unsigned long r7, r8, r9; \ \ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \ r7 = regs->ARM_r7; \ r8 = regs->ARM_r8; \ r9 = regs->ARM_r9; \ } \ memset(regs->uregs, 0, sizeof(regs->uregs)); \ if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \ current->personality & FDPIC_FUNCPTRS) { \ regs->ARM_r7 = r7; \ regs->ARM_r8 = r8; \ regs->ARM_r9 = r9; \ regs->ARM_r10 = current->mm->start_data; \ } else if (!IS_ENABLED(CONFIG_MMU)) \ regs->ARM_r10 = current->mm->start_data; \ if (current->personality & ADDR_LIMIT_32BIT) \ regs->ARM_cpsr = USR_MODE; \ else \ regs->ARM_cpsr = USR26_MODE; \ if (elf_hwcap & HWCAP_THUMB && pc & 1) \ regs->ARM_cpsr |= PSR_T_BIT; \ regs->ARM_cpsr |= PSR_ENDSTATE; \ regs->ARM_pc = pc & ~1; /* pc */ \ regs->ARM_sp = sp; /* sp */ \ }) /* Forward declaration, a strange C thing */ struct task_struct; unsigned long __get_wchan(struct task_struct *p); #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp #ifdef CONFIG_SMP #define __ALT_SMP_ASM(smp, up) \ "9998: " smp "\n" \ " .pushsection \".alt.smp.init\", \"a\"\n" \ " .align 2\n" \ " .long 9998b - .\n" \ " " up "\n" \ " .popsection\n" #else #define __ALT_SMP_ASM(smp, up) up #endif /* * Prefetching support - only ARMv5. */ #if __LINUX_ARM_ARCH__ >= 5 #define ARCH_HAS_PREFETCH static inline void prefetch(const void *ptr) { __asm__ __volatile__( "pld\t%a0" :: "p" (ptr)); } #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) #define ARCH_HAS_PREFETCHW static inline void prefetchw(const void *ptr) { __asm__ __volatile__( ".arch_extension mp\n" __ALT_SMP_ASM( "pldw\t%a0", "pld\t%a0" ) :: "p" (ptr)); } #endif #endif #endif #endif /* __ASM_ARM_PROCESSOR_H */ |