Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KVM_X86_VMX_VMCS_H #define __KVM_X86_VMX_VMCS_H #include <linux/ktime.h> #include <linux/list.h> #include <linux/nospec.h> #include <asm/kvm.h> #include <asm/vmx.h> #include "capabilities.h" struct vmcs_hdr { u32 revision_id:31; u32 shadow_vmcs:1; }; struct vmcs { struct vmcs_hdr hdr; u32 abort; char data[0]; }; DECLARE_PER_CPU(struct vmcs *, current_vmcs); /* * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT * and whose values change infrequently, but are not constant. I.e. this is * used as a write-through cache of the corresponding VMCS fields. */ struct vmcs_host_state { unsigned long cr3; /* May not match real cr3 */ unsigned long cr4; /* May not match real cr4 */ unsigned long gs_base; unsigned long fs_base; unsigned long rsp; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; #endif }; /* * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * loaded on this CPU (so we can clear them if the CPU goes down). */ struct loaded_vmcs { struct vmcs *vmcs; struct vmcs *shadow_vmcs; int cpu; bool launched; bool nmi_known_unmasked; bool hv_timer_armed; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; s64 vnmi_blocked_time; unsigned long *msr_bitmap; struct list_head loaded_vmcss_on_cpu_link; struct vmcs_host_state host_state; }; static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); } static inline bool is_debug(u32 intr_info) { return is_exception_n(intr_info, DB_VECTOR); } static inline bool is_breakpoint(u32 intr_info) { return is_exception_n(intr_info, BP_VECTOR); } static inline bool is_page_fault(u32 intr_info) { return is_exception_n(intr_info, PF_VECTOR); } static inline bool is_invalid_opcode(u32 intr_info) { return is_exception_n(intr_info, UD_VECTOR); } static inline bool is_gp_fault(u32 intr_info) { return is_exception_n(intr_info, GP_VECTOR); } static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } /* Undocumented: icebp/int1 */ static inline bool is_icebp(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); } static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); } enum vmcs_field_width { VMCS_FIELD_WIDTH_U16 = 0, VMCS_FIELD_WIDTH_U64 = 1, VMCS_FIELD_WIDTH_U32 = 2, VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3 }; static inline int vmcs_field_width(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ return VMCS_FIELD_WIDTH_U32; return (field >> 13) & 0x3; } static inline int vmcs_field_readonly(unsigned long field) { return (((field >> 10) & 0x3) == 1); } #endif /* __KVM_X86_VMX_VMCS_H */ |