Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019 Western Digital Corporation or its affiliates. * * Authors: * Atish Patra <atish.patra@wdc.com> */ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> #include <asm/sbi.h> #include <asm/kvm_vcpu_sbi.h> #ifndef CONFIG_RISCV_SBI_V01 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { .extid_start = -1UL, .extid_end = -1UL, .handler = NULL, }; #endif #ifdef CONFIG_RISCV_PMU_SBI extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu; #else static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = { .extid_start = -1UL, .extid_end = -1UL, .handler = NULL, }; #endif static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, &vcpu_sbi_ext_base, &vcpu_sbi_ext_time, &vcpu_sbi_ext_ipi, &vcpu_sbi_ext_rfence, &vcpu_sbi_ext_srst, &vcpu_sbi_ext_hsm, &vcpu_sbi_ext_pmu, &vcpu_sbi_ext_experimental, &vcpu_sbi_ext_vendor, }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; vcpu->arch.sbi_context.return_handled = 0; vcpu->stat.ecall_exit_stat++; run->exit_reason = KVM_EXIT_RISCV_SBI; run->riscv_sbi.extension_id = cp->a7; run->riscv_sbi.function_id = cp->a6; run->riscv_sbi.args[0] = cp->a0; run->riscv_sbi.args[1] = cp->a1; run->riscv_sbi.args[2] = cp->a2; run->riscv_sbi.args[3] = cp->a3; run->riscv_sbi.args[4] = cp->a4; run->riscv_sbi.args[5] = cp->a5; run->riscv_sbi.ret[0] = cp->a0; run->riscv_sbi.ret[1] = cp->a1; } void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_run *run, u32 type, u64 reason) { unsigned long i; struct kvm_vcpu *tmp; kvm_for_each_vcpu(i, tmp, vcpu->kvm) tmp->arch.power_off = true; kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); memset(&run->system_event, 0, sizeof(run->system_event)); run->system_event.type = type; run->system_event.ndata = 1; run->system_event.data[0] = reason; run->exit_reason = KVM_EXIT_SYSTEM_EVENT; } int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; /* Handle SBI return only once */ if (vcpu->arch.sbi_context.return_handled) return 0; vcpu->arch.sbi_context.return_handled = 1; /* Update return values */ cp->a0 = run->riscv_sbi.ret[0]; cp->a1 = run->riscv_sbi.ret[1]; /* Move to next instruction */ vcpu->arch.guest_context.sepc += 4; return 0; } const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid) { int i = 0; for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { if (sbi_ext[i]->extid_start <= extid && sbi_ext[i]->extid_end >= extid) return sbi_ext[i]; } return NULL; } int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret = 1; bool next_sepc = true; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; const struct kvm_vcpu_sbi_extension *sbi_ext; struct kvm_cpu_trap utrap = {0}; struct kvm_vcpu_sbi_return sbi_ret = { .out_val = 0, .err_val = 0, .utrap = &utrap, }; bool ext_is_v01 = false; sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7); if (sbi_ext && sbi_ext->handler) { #ifdef CONFIG_RISCV_SBI_V01 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER && cp->a7 <= SBI_EXT_0_1_SHUTDOWN) ext_is_v01 = true; #endif ret = sbi_ext->handler(vcpu, run, &sbi_ret); } else { /* Return error for unsupported SBI calls */ cp->a0 = SBI_ERR_NOT_SUPPORTED; goto ecall_done; } /* * When the SBI extension returns a Linux error code, it exits the ioctl * loop and forwards the error to userspace. */ if (ret < 0) { next_sepc = false; goto ecall_done; } /* Handle special error cases i.e trap, exit or userspace forward */ if (sbi_ret.utrap->scause) { /* No need to increment sepc or exit ioctl loop */ ret = 1; sbi_ret.utrap->sepc = cp->sepc; kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap); next_sepc = false; goto ecall_done; } /* Exit ioctl loop or Propagate the error code the guest */ if (sbi_ret.uexit) { next_sepc = false; ret = 0; } else { cp->a0 = sbi_ret.err_val; ret = 1; } ecall_done: if (next_sepc) cp->sepc += 4; /* a1 should only be updated when we continue the ioctl loop */ if (!ext_is_v01 && ret == 1) cp->a1 = sbi_ret.out_val; return ret; } |