Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Western Digital Corporation or its affiliates. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/perf_event.h> #include <linux/irq.h> #include <linux/stringify.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/csr.h> #include <asm/entry-common.h> #include <asm/hwprobe.h> #include <asm/cpufeature.h> #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f #define INSN_MATCH_LH 0x1003 #define INSN_MASK_LH 0x707f #define INSN_MATCH_LW 0x2003 #define INSN_MASK_LW 0x707f #define INSN_MATCH_LD 0x3003 #define INSN_MASK_LD 0x707f #define INSN_MATCH_LBU 0x4003 #define INSN_MASK_LBU 0x707f #define INSN_MATCH_LHU 0x5003 #define INSN_MASK_LHU 0x707f #define INSN_MATCH_LWU 0x6003 #define INSN_MASK_LWU 0x707f #define INSN_MATCH_SB 0x23 #define INSN_MASK_SB 0x707f #define INSN_MATCH_SH 0x1023 #define INSN_MASK_SH 0x707f #define INSN_MATCH_SW 0x2023 #define INSN_MASK_SW 0x707f #define INSN_MATCH_SD 0x3023 #define INSN_MASK_SD 0x707f #define INSN_MATCH_FLW 0x2007 #define INSN_MASK_FLW 0x707f #define INSN_MATCH_FLD 0x3007 #define INSN_MASK_FLD 0x707f #define INSN_MATCH_FLQ 0x4007 #define INSN_MASK_FLQ 0x707f #define INSN_MATCH_FSW 0x2027 #define INSN_MASK_FSW 0x707f #define INSN_MATCH_FSD 0x3027 #define INSN_MASK_FSD 0x707f #define INSN_MATCH_FSQ 0x4027 #define INSN_MASK_FSQ 0x707f #define INSN_MATCH_C_LD 0x6000 #define INSN_MASK_C_LD 0xe003 #define INSN_MATCH_C_SD 0xe000 #define INSN_MASK_C_SD 0xe003 #define INSN_MATCH_C_LW 0x4000 #define INSN_MASK_C_LW 0xe003 #define INSN_MATCH_C_SW 0xc000 #define INSN_MASK_C_SW 0xe003 #define INSN_MATCH_C_LDSP 0x6002 #define INSN_MASK_C_LDSP 0xe003 #define INSN_MATCH_C_SDSP 0xe002 #define INSN_MASK_C_SDSP 0xe003 #define INSN_MATCH_C_LWSP 0x4002 #define INSN_MASK_C_LWSP 0xe003 #define INSN_MATCH_C_SWSP 0xc002 #define INSN_MASK_C_SWSP 0xe003 #define INSN_MATCH_C_FLD 0x2000 #define INSN_MASK_C_FLD 0xe003 #define INSN_MATCH_C_FLW 0x6000 #define INSN_MASK_C_FLW 0xe003 #define INSN_MATCH_C_FSD 0xa000 #define INSN_MASK_C_FSD 0xe003 #define INSN_MATCH_C_FSW 0xe000 #define INSN_MASK_C_FSW 0xe003 #define INSN_MATCH_C_FLDSP 0x2002 #define INSN_MASK_C_FLDSP 0xe003 #define INSN_MATCH_C_FSDSP 0xa002 #define INSN_MASK_C_FSDSP 0xe003 #define INSN_MATCH_C_FLWSP 0x6002 #define INSN_MASK_C_FLWSP 0xe003 #define INSN_MATCH_C_FSWSP 0xe002 #define INSN_MASK_C_FSWSP 0xe003 #define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4) #if defined(CONFIG_64BIT) #define LOG_REGBYTES 3 #define XLEN 64 #else #define LOG_REGBYTES 2 #define XLEN 32 #endif #define REGBYTES (1 << LOG_REGBYTES) #define XLEN_MINUS_16 ((XLEN) - 16) #define SH_RD 7 #define SH_RS1 15 #define SH_RS2 20 #define SH_RS2C 2 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ (RV_X(x, 10, 3) << 3) | \ (RV_X(x, 5, 1) << 6)) #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ (RV_X(x, 5, 2) << 6)) #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ (RV_X(x, 12, 1) << 5) | \ (RV_X(x, 2, 2) << 6)) #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ (RV_X(x, 12, 1) << 5) | \ (RV_X(x, 2, 3) << 6)) #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ (RV_X(x, 7, 2) << 6)) #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ (RV_X(x, 7, 3) << 6)) #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) #define SHIFT_RIGHT(x, y) \ ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) #define REG_MASK \ ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) #define REG_OFFSET(insn, pos) \ (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) #define REG_PTR(insn, pos, regs) \ (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)) #define GET_RM(insn) (((insn) >> 12) & 7) #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) #define GET_SP(regs) (*REG_PTR(2, 0, regs)) #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) #define IMM_I(insn) ((s32)(insn) >> 20) #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ (s32)(((insn) >> 7) & 0x1f)) #define MASK_FUNCT3 0x7000 #define GET_PRECISION(insn) (((insn) >> 25) & 3) #define GET_RM(insn) (((insn) >> 12) & 7) #define PRECISION_S 0 #define PRECISION_D 1 #ifdef CONFIG_FPU #define FP_GET_RD(insn) (insn >> 7 & 0x1F) extern void put_f32_reg(unsigned long fp_reg, unsigned long value); static int set_f32_rd(unsigned long insn, struct pt_regs *regs, unsigned long val) { unsigned long fp_reg = FP_GET_RD(insn); put_f32_reg(fp_reg, val); regs->status |= SR_FS_DIRTY; return 0; } extern void put_f64_reg(unsigned long fp_reg, unsigned long value); static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) { unsigned long fp_reg = FP_GET_RD(insn); unsigned long value; #if __riscv_xlen == 32 value = (unsigned long) &val; #else value = val; #endif put_f64_reg(fp_reg, value); regs->status |= SR_FS_DIRTY; return 0; } #if __riscv_xlen == 32 extern void get_f64_reg(unsigned long fp_reg, u64 *value); static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset, struct pt_regs *regs) { unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F; u64 val; get_f64_reg(fp_reg, &val); regs->status |= SR_FS_DIRTY; return val; } #else extern unsigned long get_f64_reg(unsigned long fp_reg); static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, struct pt_regs *regs) { unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F; unsigned long val; val = get_f64_reg(fp_reg); regs->status |= SR_FS_DIRTY; return val; } #endif extern unsigned long get_f32_reg(unsigned long fp_reg); static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, struct pt_regs *regs) { unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F; unsigned long val; val = get_f32_reg(fp_reg); regs->status |= SR_FS_DIRTY; return val; } #else /* CONFIG_FPU */ static void set_f32_rd(unsigned long insn, struct pt_regs *regs, unsigned long val) {} static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {} static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, struct pt_regs *regs) { return 0; } static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, struct pt_regs *regs) { return 0; } #endif #define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs)) #define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs)) #define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs)) #define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs)) #define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs)) #define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs)) #ifdef CONFIG_RISCV_M_MODE static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { u8 val; asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); *r_val = val; return 0; } static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); return 0; } static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) { register ulong __mepc asm ("a2") = mepc; ulong val, rvc_mask = 3, tmp; asm ("and %[tmp], %[addr], 2\n" "bnez %[tmp], 1f\n" #if defined(CONFIG_64BIT) __stringify(LWU) " %[insn], (%[addr])\n" #else __stringify(LW) " %[insn], (%[addr])\n" #endif "and %[tmp], %[insn], %[rvc_mask]\n" "beq %[tmp], %[rvc_mask], 2f\n" "sll %[insn], %[insn], %[xlen_minus_16]\n" "srl %[insn], %[insn], %[xlen_minus_16]\n" "j 2f\n" "1:\n" "lhu %[insn], (%[addr])\n" "and %[tmp], %[insn], %[rvc_mask]\n" "bne %[tmp], %[rvc_mask], 2f\n" "lhu %[tmp], 2(%[addr])\n" "sll %[tmp], %[tmp], 16\n" "add %[insn], %[insn], %[tmp]\n" "2:" : [insn] "=&r" (val), [tmp] "=&r" (tmp) : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), [xlen_minus_16] "i" (XLEN_MINUS_16)); *r_insn = val; return 0; } #else static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { if (user_mode(regs)) { return __get_user(*r_val, (u8 __user *)addr); } else { *r_val = *addr; return 0; } } static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { if (user_mode(regs)) { return __put_user(val, (u8 __user *)addr); } else { *addr = val; return 0; } } #define __read_insn(regs, insn, insn_addr) \ ({ \ int __ret; \ \ if (user_mode(regs)) { \ __ret = __get_user(insn, insn_addr); \ } else { \ insn = *(__force u16 *)insn_addr; \ __ret = 0; \ } \ \ __ret; \ }) static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn) { ulong insn = 0; if (epc & 0x2) { ulong tmp = 0; u16 __user *insn_addr = (u16 __user *)epc; if (__read_insn(regs, insn, insn_addr)) return -EFAULT; /* __get_user() uses regular "lw" which sign extend the loaded * value make sure to clear higher order bits in case we "or" it * below with the upper 16 bits half. */ insn &= GENMASK(15, 0); if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) { *r_insn = insn; return 0; } insn_addr++; if (__read_insn(regs, tmp, insn_addr)) return -EFAULT; *r_insn = (tmp << 16) | insn; return 0; } else { u32 __user *insn_addr = (u32 __user *)epc; if (__read_insn(regs, insn, insn_addr)) return -EFAULT; if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) { *r_insn = insn; return 0; } insn &= GENMASK(15, 0); *r_insn = insn; return 0; } } #endif union reg_data { u8 data_bytes[8]; ulong data_ulong; u64 data_u64; }; static bool unaligned_ctl __read_mostly; /* sysctl hooks */ int unaligned_enabled __read_mostly = 1; /* Enabled by default */ int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; unsigned long insn; unsigned long addr = regs->badaddr; int i, fp = 0, shift = 0, len = 0; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED; if (!unaligned_enabled) return -1; if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS)) return -1; if (get_insn(regs, epc, &insn)) return -1; regs->epc = 0; if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { len = 4; shift = 8 * (sizeof(unsigned long) - len); #if defined(CONFIG_64BIT) } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { len = 8; shift = 8 * (sizeof(unsigned long) - len); } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { len = 4; #endif } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) { fp = 1; len = 8; } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) { fp = 1; len = 4; } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { len = 2; shift = 8 * (sizeof(unsigned long) - len); } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { len = 2; #if defined(CONFIG_64BIT) } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { len = 8; shift = 8 * (sizeof(unsigned long) - len); insn = RVC_RS2S(insn) << SH_RD; } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && ((insn >> SH_RD) & 0x1f)) { len = 8; shift = 8 * (sizeof(unsigned long) - len); #endif } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { len = 4; shift = 8 * (sizeof(unsigned long) - len); insn = RVC_RS2S(insn) << SH_RD; } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && ((insn >> SH_RD) & 0x1f)) { len = 4; shift = 8 * (sizeof(unsigned long) - len); } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) { fp = 1; len = 8; insn = RVC_RS2S(insn) << SH_RD; } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) { fp = 1; len = 8; #if defined(CONFIG_32BIT) } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) { fp = 1; len = 4; insn = RVC_RS2S(insn) << SH_RD; } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) { fp = 1; len = 4; #endif } else { regs->epc = epc; return -1; } if (!IS_ENABLED(CONFIG_FPU) && fp) return -EOPNOTSUPP; val.data_u64 = 0; for (i = 0; i < len; i++) { if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i])) return -1; } if (!fp) SET_RD(insn, regs, val.data_ulong << shift >> shift); else if (len == 8) set_f64_rd(insn, regs, val.data_u64); else set_f32_rd(insn, regs, val.data_ulong); regs->epc = epc + INSN_LEN(insn); return 0; } int handle_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; unsigned long insn; unsigned long addr = regs->badaddr; int i, len = 0, fp = 0; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); if (!unaligned_enabled) return -1; if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS)) return -1; if (get_insn(regs, epc, &insn)) return -1; regs->epc = 0; val.data_ulong = GET_RS2(insn, regs); if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { len = 4; #if defined(CONFIG_64BIT) } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { len = 8; #endif } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) { fp = 1; len = 8; val.data_u64 = GET_F64_RS2(insn, regs); } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) { fp = 1; len = 4; val.data_ulong = GET_F32_RS2(insn, regs); } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { len = 2; #if defined(CONFIG_64BIT) } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { len = 8; val.data_ulong = GET_RS2S(insn, regs); } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) { len = 8; val.data_ulong = GET_RS2C(insn, regs); #endif } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { len = 4; val.data_ulong = GET_RS2S(insn, regs); } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) { len = 4; val.data_ulong = GET_RS2C(insn, regs); } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) { fp = 1; len = 8; val.data_u64 = GET_F64_RS2S(insn, regs); } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) { fp = 1; len = 8; val.data_u64 = GET_F64_RS2C(insn, regs); #if !defined(CONFIG_64BIT) } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) { fp = 1; len = 4; val.data_ulong = GET_F32_RS2S(insn, regs); } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) { fp = 1; len = 4; val.data_ulong = GET_F32_RS2C(insn, regs); #endif } else { regs->epc = epc; return -1; } if (!IS_ENABLED(CONFIG_FPU) && fp) return -EOPNOTSUPP; for (i = 0; i < len; i++) { if (store_u8(regs, (void *)(addr + i), val.data_bytes[i])) return -1; } regs->epc = epc + INSN_LEN(insn); return 0; } bool check_unaligned_access_emulated(int cpu) { long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); unsigned long tmp_var, tmp_val; bool misaligned_emu_detected; *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN; __asm__ __volatile__ ( " "REG_L" %[tmp], 1(%[ptr])\n" : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED); /* * If unaligned_ctl is already set, this means that we detected that all * CPUS uses emulated misaligned access at boot time. If that changed * when hotplugging the new cpu, this is something we don't handle. */ if (unlikely(unaligned_ctl && !misaligned_emu_detected)) { pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); while (true) cpu_relax(); } return misaligned_emu_detected; } void unaligned_emulation_finish(void) { int cpu; /* * We can only support PR_UNALIGN controls if all CPUs have misaligned * accesses emulated since tasks requesting such control can run on any * CPU. */ for_each_online_cpu(cpu) { if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_EMULATED) { return; } } unaligned_ctl = true; } bool unaligned_ctl_available(void) { return unaligned_ctl; } |