Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_RESCTRL_H #define _ASM_X86_RESCTRL_H #ifdef CONFIG_X86_CPU_RESCTRL #include <linux/sched.h> #include <linux/jump_label.h> /** * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID * @cur_closid: The cached Class Of Service ID * @default_rmid: The user assigned Resource Monitoring ID * @default_closid: The user assigned cached Class Of Service ID * * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always * contains both parts, so we need to cache them. This also * stores the user configured per cpu CLOSID and RMID. * * The cache also helps to avoid pointless updates if the value does * not change. */ struct resctrl_pqr_state { u32 cur_rmid; u32 cur_closid; u32 default_rmid; u32 default_closid; }; DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * * Following considerations are made so that this has minimal impact * on scheduler hot path: * - This will stay as no-op unless we are running on an Intel SKU * which supports resource control or monitoring and we enable by * mounting the resctrl file system. * - Caches the per cpu CLOSid/RMID values and does the MSR write only * when a task with a different CLOSid/RMID is scheduled in. * - We allocate RMIDs/CLOSids globally in order to keep this as * simple as possible. * Must be called with preemption disabled. */ static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); u32 closid = state->default_closid; u32 rmid = state->default_rmid; u32 tmp; /* * If this task has a closid/rmid assigned, use it. * Else use the closid/rmid assigned to this cpu. */ if (static_branch_likely(&rdt_alloc_enable_key)) { tmp = READ_ONCE(tsk->closid); if (tmp) closid = tmp; } if (static_branch_likely(&rdt_mon_enable_key)) { tmp = READ_ONCE(tsk->rmid); if (tmp) rmid = tmp; } if (closid != state->cur_closid || rmid != state->cur_rmid) { state->cur_closid = closid; state->cur_rmid = rmid; wrmsr(MSR_IA32_PQR_ASSOC, rmid, closid); } } static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) { unsigned int scale = boot_cpu_data.x86_cache_occ_scale; /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ val /= scale; return val * scale; } static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) __resctrl_sched_in(tsk); } void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else static inline void resctrl_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ #endif /* _ASM_X86_RESCTRL_H */ |