Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 | /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright SUSE Linux Products GmbH 2010 * * Authors: Alexander Graf <agraf@suse.de> */ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-offsets.h> /* Hypercall entry point. Will be patched with device tree instructions. */ .global kvm_hypercall_start kvm_hypercall_start: li r3, -1 nop nop nop blr #define KVM_MAGIC_PAGE (-4096) #ifdef CONFIG_64BIT #define LL64(reg, offs, reg2) ld reg, (offs)(reg2) #define STL64(reg, offs, reg2) std reg, (offs)(reg2) #else #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2) #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2) #endif #define SCRATCH_SAVE \ /* Enable critical section. We are critical if \ shared->critical == r1 */ \ STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \ \ /* Save state */ \ PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ mfcr r31; \ stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); #define SCRATCH_RESTORE \ /* Restore state */ \ PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \ mtcr r30; \ PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ \ /* Disable critical section. We are critical if \ shared->critical == r1 and r2 is always != r1 */ \ STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); .global kvm_emulate_mtmsrd kvm_emulate_mtmsrd: SCRATCH_SAVE /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) lis r30, (~(MSR_EE | MSR_RI))@h ori r30, r30, (~(MSR_EE | MSR_RI))@l and r31, r31, r30 /* OR the register's (MSR_EE|MSR_RI) on MSR */ kvm_emulate_mtmsrd_reg: ori r30, r0, 0 andi. r30, r30, (MSR_EE|MSR_RI) or r31, r31, r30 /* Put MSR back into magic page */ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Check if we have to fetch an interrupt */ lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) cmpwi r31, 0 beq+ no_check /* Check if we may trigger an interrupt */ andi. r30, r30, MSR_EE beq no_check SCRATCH_RESTORE /* Nag hypervisor */ kvm_emulate_mtmsrd_orig_ins: tlbsync b kvm_emulate_mtmsrd_branch no_check: SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_mtmsrd_branch: b . kvm_emulate_mtmsrd_end: .global kvm_emulate_mtmsrd_branch_offs kvm_emulate_mtmsrd_branch_offs: .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 .global kvm_emulate_mtmsrd_reg_offs kvm_emulate_mtmsrd_reg_offs: .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 .global kvm_emulate_mtmsrd_orig_ins_offs kvm_emulate_mtmsrd_orig_ins_offs: .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4 .global kvm_emulate_mtmsrd_len kvm_emulate_mtmsrd_len: .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI) #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS .global kvm_emulate_mtmsr kvm_emulate_mtmsr: SCRATCH_SAVE /* Fetch old MSR in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Find the changed bits between old and new MSR */ kvm_emulate_mtmsr_reg1: ori r30, r0, 0 xor r31, r30, r31 /* Check if we need to really do mtmsr */ LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) and. r31, r31, r30 /* No critical bits changed? Maybe we can stay in the guest. */ beq maybe_stay_in_guest do_mtmsr: SCRATCH_RESTORE /* Just fire off the mtmsr if it's critical */ kvm_emulate_mtmsr_orig_ins: mtmsr r0 b kvm_emulate_mtmsr_branch maybe_stay_in_guest: /* Get the target register in r30 */ kvm_emulate_mtmsr_reg2: ori r30, r0, 0 /* Check if we have to fetch an interrupt */ lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) cmpwi r31, 0 beq+ no_mtmsr /* Check if we may trigger an interrupt */ andi. r31, r30, MSR_EE beq no_mtmsr b do_mtmsr no_mtmsr: /* Put MSR into magic page because we don't call mtmsr */ STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_mtmsr_branch: b . kvm_emulate_mtmsr_end: .global kvm_emulate_mtmsr_branch_offs kvm_emulate_mtmsr_branch_offs: .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_reg1_offs kvm_emulate_mtmsr_reg1_offs: .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_reg2_offs kvm_emulate_mtmsr_reg2_offs: .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_orig_ins_offs kvm_emulate_mtmsr_orig_ins_offs: .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 .global kvm_emulate_mtmsr_len kvm_emulate_mtmsr_len: .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 .global kvm_emulate_wrteei kvm_emulate_wrteei: SCRATCH_SAVE /* Fetch old MSR in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Remove MSR_EE from old MSR */ li r30, 0 ori r30, r30, MSR_EE andc r31, r31, r30 /* OR new MSR_EE onto the old MSR */ kvm_emulate_wrteei_ee: ori r31, r31, 0 /* Write new MSR value back */ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_wrteei_branch: b . kvm_emulate_wrteei_end: .global kvm_emulate_wrteei_branch_offs kvm_emulate_wrteei_branch_offs: .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4 .global kvm_emulate_wrteei_ee_offs kvm_emulate_wrteei_ee_offs: .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4 .global kvm_emulate_wrteei_len kvm_emulate_wrteei_len: .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4 .global kvm_emulate_mtsrin kvm_emulate_mtsrin: SCRATCH_SAVE LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) andi. r31, r31, MSR_DR | MSR_IR beq kvm_emulate_mtsrin_reg1 SCRATCH_RESTORE kvm_emulate_mtsrin_orig_ins: nop b kvm_emulate_mtsrin_branch kvm_emulate_mtsrin_reg1: /* rX >> 26 */ rlwinm r30,r0,6,26,29 kvm_emulate_mtsrin_reg2: stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30) SCRATCH_RESTORE /* Go back to caller */ kvm_emulate_mtsrin_branch: b . kvm_emulate_mtsrin_end: .global kvm_emulate_mtsrin_branch_offs kvm_emulate_mtsrin_branch_offs: .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_reg1_offs kvm_emulate_mtsrin_reg1_offs: .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_reg2_offs kvm_emulate_mtsrin_reg2_offs: .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_orig_ins_offs kvm_emulate_mtsrin_orig_ins_offs: .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4 .global kvm_emulate_mtsrin_len kvm_emulate_mtsrin_len: .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 |