Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_ARCHRANDOM_H #define _ASM_ARCHRANDOM_H #include <linux/arm-smccc.h> #include <linux/bug.h> #include <linux/kernel.h> #include <asm/cpufeature.h> #define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL extern bool smccc_trng_available; static inline bool __init smccc_probe_trng(void) { struct arm_smccc_res res; arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res); if ((s32)res.a0 < 0) return false; return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION; } static inline bool __arm64_rndr(unsigned long *v) { bool ok; /* * Reads of RNDR set PSTATE.NZCV to 0b0000 on success, * and set PSTATE.NZCV to 0b0100 otherwise. */ asm volatile( __mrs_s("%0", SYS_RNDR_EL0) "\n" " cset %w1, ne\n" : "=r" (*v), "=r" (ok) : : "cc"); return ok; } static inline bool __arm64_rndrrs(unsigned long *v) { bool ok; /* * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success, * and set PSTATE.NZCV to 0b0100 otherwise. */ asm volatile( __mrs_s("%0", SYS_RNDRRS_EL0) "\n" " cset %w1, ne\n" : "=r" (*v), "=r" (ok) : : "cc"); return ok; } static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) { /* * Only support the generic interface after we have detected * the system wide capability, avoiding complexity with the * cpufeature code and with potential scheduling between CPUs * with and without the feature. */ if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) return 1; return 0; } static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) { if (!max_longs) return 0; /* * We prefer the SMCCC call, since its semantics (return actual * hardware backed entropy) is closer to the idea behind this * function here than what even the RNDRSS register provides * (the output of a pseudo RNG freshly seeded by a TRNG). */ if (smccc_trng_available) { struct arm_smccc_res res; max_longs = min_t(size_t, 3, max_longs); arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res); if ((int)res.a0 >= 0) { switch (max_longs) { case 3: *v++ = res.a1; fallthrough; case 2: *v++ = res.a2; fallthrough; case 1: *v++ = res.a3; break; } return max_longs; } } /* * RNDRRS is not backed by an entropy source but by a DRBG that is * reseeded after each invocation. This is not a 100% fit but good * enough to implement this API if no other entropy source exists. */ if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v)) return 1; return 0; } static inline bool __init __early_cpu_has_rndr(void) { /* Open code as we run prior to the first call to cpufeature. */ unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; } static inline size_t __init __must_check arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) { WARN_ON(system_state != SYSTEM_BOOTING); if (!max_longs) return 0; if (smccc_trng_available) { struct arm_smccc_res res; max_longs = min_t(size_t, 3, max_longs); arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res); if ((int)res.a0 >= 0) { switch (max_longs) { case 3: *v++ = res.a1; fallthrough; case 2: *v++ = res.a2; fallthrough; case 1: *v++ = res.a3; break; } return max_longs; } } if (__early_cpu_has_rndr() && __arm64_rndr(v)) return 1; return 0; } #define arch_get_random_seed_longs_early arch_get_random_seed_longs_early #endif /* _ASM_ARCHRANDOM_H */ |