Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 | // SPDX-License-Identifier: GPL-2.0 /* * Hosting Protected Virtual Machines * * Copyright IBM Corp. 2019, 2020 * Author(s): Janosch Frank <frankja@linux.ibm.com> */ #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/pagemap.h> #include <linux/sched/signal.h> #include <asm/gmap.h> #include <asm/uv.h> #include <asm/mman.h> #include "kvm-s390.h" int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) { int cc = 0; if (kvm_s390_pv_cpu_get_handle(vcpu)) { cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc); KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", vcpu->vcpu_id, *rc, *rrc); WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc); } /* Intended memory leak for something that should never happen. */ if (!cc) free_pages(vcpu->arch.pv.stor_base, get_order(uv_info.guest_cpu_stor_len)); free_page(sida_origin(vcpu->arch.sie_block)); vcpu->arch.sie_block->pv_handle_cpu = 0; vcpu->arch.sie_block->pv_handle_config = 0; memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); vcpu->arch.sie_block->sdf = 0; /* * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0). * Use the reset value of gbea to avoid leaking the kernel pointer of * the just freed sida. */ vcpu->arch.sie_block->gbea = 1; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return cc ? EIO : 0; } int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) { struct uv_cb_csc uvcb = { .header.cmd = UVC_CMD_CREATE_SEC_CPU, .header.len = sizeof(uvcb), }; int cc; if (kvm_s390_pv_cpu_get_handle(vcpu)) return -EINVAL; vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(uv_info.guest_cpu_stor_len)); if (!vcpu->arch.pv.stor_base) return -ENOMEM; /* Input */ uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm); uvcb.num = vcpu->arch.sie_block->icpua; uvcb.state_origin = (u64)vcpu->arch.sie_block; uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base; /* Alloc Secure Instruction Data Area Designation */ vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); if (!vcpu->arch.sie_block->sidad) { free_pages(vcpu->arch.pv.stor_base, get_order(uv_info.guest_cpu_stor_len)); return -ENOMEM; } cc = uv_call(0, (u64)&uvcb); *rc = uvcb.header.rc; *rrc = uvcb.header.rrc; KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x", vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc, uvcb.header.rrc); if (cc) { u16 dummy; kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy); return -EIO; } /* Output */ vcpu->arch.pv.handle = uvcb.cpu_handle; vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle; vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm); vcpu->arch.sie_block->sdf = 2; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } /* only free resources when the destroy was successful */ static void kvm_s390_pv_dealloc_vm(struct kvm *kvm) { vfree(kvm->arch.pv.stor_var); free_pages(kvm->arch.pv.stor_base, get_order(uv_info.guest_base_stor_len)); memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv)); } static int kvm_s390_pv_alloc_vm(struct kvm *kvm) { unsigned long base = uv_info.guest_base_stor_len; unsigned long virt = uv_info.guest_virt_var_stor_len; unsigned long npages = 0, vlen = 0; struct kvm_memory_slot *memslot; kvm->arch.pv.stor_var = NULL; kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base)); if (!kvm->arch.pv.stor_base) return -ENOMEM; /* * Calculate current guest storage for allocation of the * variable storage, which is based on the length in MB. * * Slots are sorted by GFN */ mutex_lock(&kvm->slots_lock); memslot = kvm_memslots(kvm)->memslots; npages = memslot->base_gfn + memslot->npages; mutex_unlock(&kvm->slots_lock); kvm->arch.pv.guest_len = npages * PAGE_SIZE; /* Allocate variable storage */ vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE); vlen += uv_info.guest_virt_base_stor_len; /* * The Create Secure Configuration Ultravisor Call does not support * using large pages for the virtual memory area. * This is a hardware limitation. */ kvm->arch.pv.stor_var = vmalloc_no_huge(vlen); if (!kvm->arch.pv.stor_var) goto out_err; return 0; out_err: kvm_s390_pv_dealloc_vm(kvm); return -ENOMEM; } /* this should not fail, but if it does, we must not free the donated memory */ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) { int cc; /* make all pages accessible before destroying the guest */ s390_reset_acc(kvm->mm); cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), UVC_CMD_DESTROY_SEC_CONF, rc, rrc); WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); atomic_set(&kvm->mm->context.is_protected, 0); KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc); /* Inteded memory leak on "impossible" error */ if (!cc) kvm_s390_pv_dealloc_vm(kvm); return cc ? -EIO : 0; } int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) { struct uv_cb_cgc uvcb = { .header.cmd = UVC_CMD_CREATE_SEC_CONF, .header.len = sizeof(uvcb) }; int cc, ret; u16 dummy; ret = kvm_s390_pv_alloc_vm(kvm); if (ret) return ret; /* Inputs */ uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */ uvcb.guest_stor_len = kvm->arch.pv.guest_len; uvcb.guest_asce = kvm->arch.gmap->asce; uvcb.guest_sca = (unsigned long)kvm->arch.sca; uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base; uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; cc = uv_call(0, (u64)&uvcb); *rc = uvcb.header.rc; *rrc = uvcb.header.rrc; KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x", uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc); /* Outputs */ kvm->arch.pv.handle = uvcb.guest_handle; if (cc) { if (uvcb.header.rc & UVC_RC_NEED_DESTROY) kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); else kvm_s390_pv_dealloc_vm(kvm); return -EIO; } kvm->arch.gmap->guest_handle = uvcb.guest_handle; return 0; } int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, u16 *rrc) { struct uv_cb_ssc uvcb = { .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS, .header.len = sizeof(uvcb), .sec_header_origin = (u64)hdr, .sec_header_len = length, .guest_handle = kvm_s390_pv_get_handle(kvm), }; int cc = uv_call(0, (u64)&uvcb); *rc = uvcb.header.rc; *rrc = uvcb.header.rrc; KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x", *rc, *rrc); if (!cc) atomic_set(&kvm->mm->context.is_protected, 1); return cc ? -EINVAL : 0; } static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak, u64 offset, u16 *rc, u16 *rrc) { struct uv_cb_unp uvcb = { .header.cmd = UVC_CMD_UNPACK_IMG, .header.len = sizeof(uvcb), .guest_handle = kvm_s390_pv_get_handle(kvm), .gaddr = addr, .tweak[0] = tweak, .tweak[1] = offset, }; int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb); *rc = uvcb.header.rc; *rrc = uvcb.header.rrc; if (ret && ret != -EAGAIN) KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x", uvcb.gaddr, *rc, *rrc); return ret; } int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, unsigned long tweak, u16 *rc, u16 *rrc) { u64 offset = 0; int ret = 0; if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK) return -EINVAL; KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx", addr, size); while (offset < size) { ret = unpack_one(kvm, addr, tweak, offset, rc, rrc); if (ret == -EAGAIN) { cond_resched(); if (fatal_signal_pending(current)) break; continue; } if (ret) break; addr += PAGE_SIZE; offset += PAGE_SIZE; } if (!ret) KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful"); return ret; } int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state) { struct uv_cb_cpu_set_state uvcb = { .header.cmd = UVC_CMD_CPU_SET_STATE, .header.len = sizeof(uvcb), .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu), .state = state, }; int cc; cc = uv_call(0, (u64)&uvcb); KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x", vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc); if (cc) return -EINVAL; return 0; } |