Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Steven Rostedt, Red Hat Inc */ #include <linux/cfi_types.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/ftrace.h> #include <asm/export.h> #include <asm/nospec-branch.h> #include <asm/unwind_hints.h> #include <asm/frame.h> .code64 .section .text, "ax" #ifdef CONFIG_FRAME_POINTER /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 #endif /* CONFIG_FRAME_POINTER */ /* Size of stack used to save mcount regs in save_mcount_regs */ #define MCOUNT_REG_SIZE (FRAME_SIZE + MCOUNT_FRAME_SIZE) /* * gcc -pg option adds a call to 'mcount' in most functions. * When -mfentry is used, the call is to 'fentry' and not 'mcount' * and is done before the function's stack frame is set up. * They both require a set of regs to be saved before calling * any C code and restored before returning back to the function. * * On boot up, all these calls are converted into nops. When tracing * is enabled, the call can jump to either ftrace_caller or * ftrace_regs_caller. Callbacks (tracing functions) that require * ftrace_regs_caller (like kprobes) need to have pt_regs passed to * it. For this reason, the size of the pt_regs structure will be * allocated on the stack and the required mcount registers will * be saved in the locations that pt_regs has them in. */ /* * @added: the amount of stack added before calling this * * After this is called, the following registers contain: * * %rdi - holds the address that called the trampoline * %rsi - holds the parent function (traced function's return address) * %rdx - holds the original %rbp */ .macro save_mcount_regs added=0 #ifdef CONFIG_FRAME_POINTER /* Save the original rbp */ pushq %rbp /* * Stack traces will stop at the ftrace trampoline if the frame pointer * is not set up properly. If fentry is used, we need to save a frame * pointer for the parent as well as the function traced, because the * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ /* * We add enough stack to save all regs. */ subq $(FRAME_SIZE), %rsp movq %rax, RAX(%rsp) movq %rcx, RCX(%rsp) movq %rdx, RDX(%rsp) movq %rsi, RSI(%rsp) movq %rdi, RDI(%rsp) movq %r8, R8(%rsp) movq %r9, R9(%rsp) movq $0, ORIG_RAX(%rsp) /* * Save the original RBP. Even though the mcount ABI does not * require this, it helps out callers. */ #ifdef CONFIG_FRAME_POINTER movq MCOUNT_REG_SIZE-8(%rsp), %rdx #else movq %rbp, %rdx #endif movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi movq %rdi, RIP(%rsp) /* * Now %rdi (the first parameter) has the return address of * where ftrace_call returns. But the callbacks expect the * address of the call itself. */ subq $MCOUNT_INSN_SIZE, %rdi .endm .macro restore_mcount_regs save=0 /* ftrace_regs_caller or frame pointers require this */ movq RBP(%rsp), %rbp movq R9(%rsp), %r9 movq R8(%rsp), %r8 movq RDI(%rsp), %rdi movq RSI(%rsp), %rsi movq RDX(%rsp), %rdx movq RCX(%rsp), %rcx movq RAX(%rsp), %rax addq $MCOUNT_REG_SIZE-\save, %rsp .endm SYM_TYPED_FUNC_START(ftrace_stub) CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(ftrace_stub) SYM_TYPED_FUNC_START(ftrace_stub_graph) CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(ftrace_stub_graph) #ifdef CONFIG_DYNAMIC_FTRACE SYM_FUNC_START(__fentry__) CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) SYM_FUNC_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs CALL_DEPTH_ACCOUNT /* Stack - skipping return address of ftrace_caller */ leaq MCOUNT_REG_SIZE+8(%rsp), %rcx movq %rcx, RSP(%rsp) SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* regs go into 4th parameter */ leaq (%rsp), %rcx /* Only ops with REGS flag set should have CS register set */ movq $0, CS(%rsp) /* Account for the function call below */ CALL_DEPTH_ACCOUNT SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub /* Handlers can change the RIP */ movq RIP(%rsp), %rax movq %rax, MCOUNT_REG_SIZE(%rsp) restore_mcount_regs /* * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. */ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR RET SYM_FUNC_END(ftrace_caller); STACK_FRAME_NON_STANDARD_FP(ftrace_caller) SYM_FUNC_START(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq /* added 8 bytes to save flags */ save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ CALL_DEPTH_ACCOUNT SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) ANNOTATE_NOENDBR /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* Save the rest of pt_regs */ movq %r15, R15(%rsp) movq %r14, R14(%rsp) movq %r13, R13(%rsp) movq %r12, R12(%rsp) movq %r11, R11(%rsp) movq %r10, R10(%rsp) movq %rbx, RBX(%rsp) /* Copy saved flags */ movq MCOUNT_REG_SIZE(%rsp), %rcx movq %rcx, EFLAGS(%rsp) /* Kernel segments */ movq $__KERNEL_DS, %rcx movq %rcx, SS(%rsp) movq $__KERNEL_CS, %rcx movq %rcx, CS(%rsp) /* Stack - skipping return address and flags */ leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx movq %rcx, RSP(%rsp) ENCODE_FRAME_POINTER /* regs go into 4th parameter */ leaq (%rsp), %rcx /* Account for the function call below */ CALL_DEPTH_ACCOUNT SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ANNOTATE_NOENDBR call ftrace_stub /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax movq %rax, MCOUNT_REG_SIZE(%rsp) /* Handlers can change the RIP */ movq RIP(%rsp), %rax movq %rax, MCOUNT_REG_SIZE+8(%rsp) /* restore the rest of pt_regs */ movq R15(%rsp), %r15 movq R14(%rsp), %r14 movq R13(%rsp), %r13 movq R12(%rsp), %r12 movq R10(%rsp), %r10 movq RBX(%rsp), %rbx movq ORIG_RAX(%rsp), %rax movq %rax, MCOUNT_REG_SIZE-8(%rsp) /* * If ORIG_RAX is anything but zero, make this a call to that. * See arch_ftrace_set_direct_caller(). */ testq %rax, %rax SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL) ANNOTATE_NOENDBR jnz 1f restore_mcount_regs /* Restore flags */ popfq /* * The trampoline will add the return. */ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR RET /* Swap the flags with orig_rax */ 1: movq MCOUNT_REG_SIZE(%rsp), %rdi movq %rdi, MCOUNT_REG_SIZE-8(%rsp) movq %rax, MCOUNT_REG_SIZE(%rsp) restore_mcount_regs 8 /* Restore flags */ popfq UNWIND_HINT_FUNC /* * The above left an extra return value on the stack; effectively * doing a tail-call without using a register. This PUSH;RET * pattern unbalances the RSB, inject a pointless CALL to rebalance. */ ANNOTATE_INTRA_FUNCTION_CALL CALL .Ldo_rebalance int3 .Ldo_rebalance: add $8, %rsp ALTERNATIVE __stringify(RET), \ __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \ X86_FEATURE_CALL_DEPTH SYM_FUNC_END(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ SYM_FUNC_START(__fentry__) CALL_DEPTH_ACCOUNT cmpq $ftrace_stub, ftrace_trace_function jnz trace RET trace: /* save_mcount_regs fills in first two parameters */ save_mcount_regs /* * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the * ip and parent ip are used and the list function is called when * function tracing is enabled. */ movq ftrace_trace_function, %r8 CALL_NOSPEC r8 restore_mcount_regs jmp ftrace_stub SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) STACK_FRAME_NON_STANDARD_FP(__fentry__) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_CODE_START(return_to_handler) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR subq $16, %rsp /* Save the return values */ movq %rax, (%rsp) movq %rdx, 8(%rsp) movq %rbp, %rdi call ftrace_return_to_handler movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax addq $16, %rsp /* * Jump back to the old return address. This cannot be JMP_NOSPEC rdi * since IBT would demand that contain ENDBR, which simply isn't so for * return addresses. Use a retpoline here to keep the RSB balanced. */ ANNOTATE_INTRA_FUNCTION_CALL call .Ldo_rop int3 .Ldo_rop: mov %rdi, (%rsp) ALTERNATIVE __stringify(RET), \ __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \ X86_FEATURE_CALL_DEPTH SYM_CODE_END(return_to_handler) #endif |