Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/kvm_book3s_64.h> #include <asm/reg.h> #include <asm/ppc-opcode.h> static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) { u64 texasr, tfiar; u64 msr = vcpu->arch.shregs.msr; tfiar = vcpu->arch.regs.nip & ~0x3ull; texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) texasr |= TEXASR_SUSP; if (msr & MSR_PR) { texasr |= TEXASR_PR; tfiar |= 1; } vcpu->arch.tfiar = tfiar; /* Preserve ROT and TL fields of existing TEXASR */ vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; } /* * This gets called on a softpatch interrupt on POWER9 DD2.2 processors. * We expect to find a TM-related instruction to be emulated. The * instruction image is in vcpu->arch.emul_inst. If the guest was in * TM suspended or transactional state, the checkpointed state has been * reclaimed and is in the vcpu struct. The CPU is in virtual mode in * host context. */ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) { u32 instr = vcpu->arch.emul_inst; u64 msr = vcpu->arch.shregs.msr; u64 newmsr, bescr; int ra, rs; /* * The TM softpatch interrupt sets NIP to the instruction following * the faulting instruction, which is not executed. Rewind nip to the * faulting instruction so it looks like a normal synchronous * interrupt, then update nip in the places where the instruction is * emulated. */ vcpu->arch.regs.nip -= 4; /* * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit * in these instructions, so masking bit 31 out doesn't change these * instructions. For treclaim., tsr., and trechkpt. instructions if bit * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit * 31 is an acceptable way to handle these invalid forms that have * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ * bit 31 set) can generate a softpatch interrupt. Hence both forms * are handled below for these instructions so they behave the same way. */ switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; /* should only get here for Sx -> T1 transition */ WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))); newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; vcpu->arch.cfar = vcpu->arch.regs.nip; vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; return RESUME_GUEST; case PPC_INST_RFEBB: if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); return RESUME_GUEST; } /* check EBB facility is available */ if (!(vcpu->arch.hfscr & HFSCR_EBB)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } bescr = vcpu->arch.bescr; /* expect to see a S->T transition requested */ WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && ((bescr >> 30) & 3) == 2)); bescr &= ~BESCR_GE; if (instr & (1 << 11)) bescr |= BESCR_GE; vcpu->arch.bescr = bescr; msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; vcpu->arch.shregs.msr = msr; vcpu->arch.cfar = vcpu->arch.regs.nip; vcpu->arch.regs.nip = vcpu->arch.ebbrr; return RESUME_GUEST; case PPC_INST_MTMSRD: /* XXX do we need to check for PR=0 here? */ rs = (instr >> 21) & 0x1f; newmsr = kvmppc_get_gpr(vcpu, rs); /* check this is a Sx -> T1 transition */ WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))); /* mtmsrd doesn't change LE */ newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; vcpu->arch.regs.nip += 4; return RESUME_GUEST; /* ignore bit 31, see comment above */ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); return RESUME_GUEST; } /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if (!(msr & MSR_TM)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } /* Set CR0 to indicate previous transactional state */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); /* L=1 => tresume, L=0 => tsuspend */ if (instr & (1 << 21)) { if (MSR_TM_SUSPENDED(msr)) msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; } else { if (MSR_TM_TRANSACTIONAL(msr)) msr = (msr & ~MSR_TS_MASK) | MSR_TS_S; } vcpu->arch.shregs.msr = msr; vcpu->arch.regs.nip += 4; return RESUME_GUEST; /* ignore bit 31, see comment above */ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if (!(msr & MSR_TM)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } /* If no transaction active, generate TM bad thing */ if (!MSR_TM_ACTIVE(msr)) { kvmppc_core_queue_program(vcpu, SRR1_PROGTM); return RESUME_GUEST; } /* If failure was not previously recorded, recompute TEXASR */ if (!(vcpu->arch.orig_texasr & TEXASR_FS)) { ra = (instr >> 16) & 0x1f; if (ra) ra = kvmppc_get_gpr(vcpu, ra) & 0xff; emulate_tx_failure(vcpu, ra); } copy_from_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); vcpu->arch.shregs.msr &= ~MSR_TS_MASK; vcpu->arch.regs.nip += 4; return RESUME_GUEST; /* ignore bit 31, see comment above */ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE; vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56; vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL; return -1; /* rerun host interrupt handler */ } if (!(msr & MSR_TM)) { /* generate a facility unavailable interrupt */ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE; vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); return RESUME_GUEST; } /* If transaction active or TEXASR[FS] = 0, bad thing */ if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) { kvmppc_core_queue_program(vcpu, SRR1_PROGTM); return RESUME_GUEST; } copy_to_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29); vcpu->arch.shregs.msr = msr | MSR_TS_S; vcpu->arch.regs.nip += 4; return RESUME_GUEST; } /* What should we do here? We didn't recognize the instruction */ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); return RESUME_GUEST; } |