Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 | /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2004 Thiemo Seufer * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/irqflags.h> #include <asm/mipsregs.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/isadep.h> #include <asm/sysmips.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/asm-offsets.h> .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME TRACE_IRQS_ON_RELOAD STI .set at lw t1, PT_EPC(sp) # skip syscall on return addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) sw a3, PT_R26(sp) # save a3 for syscall restarting /* * More than four arguments. Try to deal with it by copying the * stack arguments from the user stack to the kernel stack. * This Sucks (TM). */ lw t0, PT_R29(sp) # get old user stack pointer /* * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ addu t4, t0, 32 bltz t4, bad_stack # -> sp is bad /* * Ok, copy the args from the luser stack to the kernel stack. */ .set push .set noreorder .set nomacro load_a4: user_lw(t5, 16(t0)) # argument #5 from usp load_a5: user_lw(t6, 20(t0)) # argument #6 from usp load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: sw t5, 16(sp) # argument #5 to ksp sw t6, 20(sp) # argument #6 to ksp sw t7, 24(sp) # argument #7 to ksp sw t8, 28(sp) # argument #8 to ksp .set pop .section __ex_table,"a" PTR_WD load_a4, bad_stack_a4 PTR_WD load_a5, bad_stack_a5 PTR_WD load_a6, bad_stack_a6 PTR_WD load_a7, bad_stack_a7 .previous lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes syscall_common: subu v0, v0, __NR_O32_Linux # check syscall number sltiu t0, v0, __NR_O32_Linux_syscalls beqz t0, illegal_syscall sll t0, v0, 2 la t1, sys_call_table addu t1, t0 lw t2, (t1) # syscall routine beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? sltu t0, t0, v0 sw t0, PT_R7(sp) # set error flag beqz t0, 1f lw t1, PT_R2(sp) # syscall number negu v0 # error sw t1, PT_R0(sp) # save it for syscall restarting 1: sw v0, PT_R2(sp) # result o32_syscall_exit: j syscall_exit_partial /* ------------------------------------------------------------------------ */ syscall_trace_entry: SAVE_STATIC move a0, sp /* * syscall number is in v0 unless we called syscall(__NR_###) * where the real syscall number is in a0 */ move a1, v0 subu t2, v0, __NR_O32_Linux bnez t2, 1f /* __NR_syscall at offset 0 */ lw a1, PT_R4(sp) 1: jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall RESTORE_STATIC lw v0, PT_R2(sp) # Restore syscall (maybe modified) lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) j syscall_common 1: j syscall_exit /* ------------------------------------------------------------------------ */ /* * Our open-coded access area sanity test for the stack pointer * failed. We probably should handle this case a bit more drastic. */ bad_stack: li v0, EFAULT sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) j o32_syscall_exit bad_stack_a4: li t5, 0 b load_a5 bad_stack_a5: li t6, 0 b load_a6 bad_stack_a6: li t7, 0 b load_a7 bad_stack_a7: li t8, 0 b loads_done /* * The system call does not exist in this kernel */ illegal_syscall: li v0, ENOSYS # error sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) j o32_syscall_exit END(handle_sys) LEAF(sys_syscall) subu t0, a0, __NR_O32_Linux # check syscall number sltiu v0, t0, __NR_O32_Linux_syscalls beqz t0, einval # do not recurse sll t1, t0, 2 beqz v0, einval lw t2, sys_call_table(t1) # syscall routine move a0, a1 # shift argument registers move a1, a2 move a2, a3 lw a3, 16(sp) lw t4, 20(sp) lw t5, 24(sp) lw t6, 28(sp) sw t4, 16(sp) sw t5, 20(sp) sw t6, 24(sp) jr t2 /* Unreached */ einval: li v0, -ENOSYS jr ra END(sys_syscall) #ifdef CONFIG_MIPS_MT_FPAFF /* * For FPU affinity scheduling on MIPS MT processors, we need to * intercept sys_sched_xxxaffinity() calls until we get a proper hook * in kernel/sched/core.c. Considered only temporary we only support * these hooks for the 32-bit kernel - there is no MIPS64 MT processor * atm. */ #define sys_sched_setaffinity mipsmt_sys_sched_setaffinity #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) PTR_WD entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) #include <asm/syscall_table_o32.h> |