]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - arch/mips/kvm/trap_emul.c
Merge tag 'v3.17-rc3' into next
[zynq/linux.git] / arch / mips / kvm / trap_emul.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16
17 #include <linux/kvm_host.h>
18
19 #include "opcode.h"
20 #include "interrupt.h"
21
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23 {
24         gpa_t gpa;
25         uint32_t kseg = KSEGX(gva);
26
27         if ((kseg == CKSEG0) || (kseg == CKSEG1))
28                 gpa = CPHYSADDR(gva);
29         else {
30                 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31                 kvm_mips_dump_host_tlbs();
32                 gpa = KVM_INVALID_ADDR;
33         }
34
35         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
36
37         return gpa;
38 }
39
40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41 {
42         struct kvm_run *run = vcpu->run;
43         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
44         unsigned long cause = vcpu->arch.host_cp0_cause;
45         enum emulation_result er = EMULATE_DONE;
46         int ret = RESUME_GUEST;
47
48         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
49                 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
50         else
51                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
52
53         switch (er) {
54         case EMULATE_DONE:
55                 ret = RESUME_GUEST;
56                 break;
57
58         case EMULATE_FAIL:
59                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
60                 ret = RESUME_HOST;
61                 break;
62
63         case EMULATE_WAIT:
64                 run->exit_reason = KVM_EXIT_INTR;
65                 ret = RESUME_HOST;
66                 break;
67
68         default:
69                 BUG();
70         }
71         return ret;
72 }
73
74 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
75 {
76         struct kvm_run *run = vcpu->run;
77         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
78         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
79         unsigned long cause = vcpu->arch.host_cp0_cause;
80         enum emulation_result er = EMULATE_DONE;
81         int ret = RESUME_GUEST;
82
83         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
84             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
85                 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
86                           cause, opc, badvaddr);
87                 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
88
89                 if (er == EMULATE_DONE)
90                         ret = RESUME_GUEST;
91                 else {
92                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
93                         ret = RESUME_HOST;
94                 }
95         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
96                 /*
97                  * XXXKYMA: The guest kernel does not expect to get this fault
98                  * when we are not using HIGHMEM. Need to address this in a
99                  * HIGHMEM kernel
100                  */
101                 kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
102                         cause, opc, badvaddr);
103                 kvm_mips_dump_host_tlbs();
104                 kvm_arch_vcpu_dump_regs(vcpu);
105                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
106                 ret = RESUME_HOST;
107         } else {
108                 kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
109                         cause, opc, badvaddr);
110                 kvm_mips_dump_host_tlbs();
111                 kvm_arch_vcpu_dump_regs(vcpu);
112                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
113                 ret = RESUME_HOST;
114         }
115         return ret;
116 }
117
118 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
119 {
120         struct kvm_run *run = vcpu->run;
121         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
122         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
123         unsigned long cause = vcpu->arch.host_cp0_cause;
124         enum emulation_result er = EMULATE_DONE;
125         int ret = RESUME_GUEST;
126
127         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
128             && KVM_GUEST_KERNEL_MODE(vcpu)) {
129                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
130                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
131                         ret = RESUME_HOST;
132                 }
133         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
134                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
135                 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
136                           cause, opc, badvaddr);
137                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
138                 if (er == EMULATE_DONE)
139                         ret = RESUME_GUEST;
140                 else {
141                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
142                         ret = RESUME_HOST;
143                 }
144         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
145                 /*
146                  * All KSEG0 faults are handled by KVM, as the guest kernel does
147                  * not expect to ever get them
148                  */
149                 if (kvm_mips_handle_kseg0_tlb_fault
150                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
151                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
152                         ret = RESUME_HOST;
153                 }
154         } else {
155                 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
156                         cause, opc, badvaddr);
157                 kvm_mips_dump_host_tlbs();
158                 kvm_arch_vcpu_dump_regs(vcpu);
159                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160                 ret = RESUME_HOST;
161         }
162         return ret;
163 }
164
165 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
166 {
167         struct kvm_run *run = vcpu->run;
168         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
169         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
170         unsigned long cause = vcpu->arch.host_cp0_cause;
171         enum emulation_result er = EMULATE_DONE;
172         int ret = RESUME_GUEST;
173
174         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
175             && KVM_GUEST_KERNEL_MODE(vcpu)) {
176                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
177                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
178                         ret = RESUME_HOST;
179                 }
180         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
181                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
182                 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
183                           vcpu->arch.pc, badvaddr);
184
185                 /*
186                  * User Address (UA) fault, this could happen if
187                  * (1) TLB entry not present/valid in both Guest and shadow host
188                  *     TLBs, in this case we pass on the fault to the guest
189                  *     kernel and let it handle it.
190                  * (2) TLB entry is present in the Guest TLB but not in the
191                  *     shadow, in this case we inject the TLB from the Guest TLB
192                  *     into the shadow host TLB
193                  */
194
195                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
196                 if (er == EMULATE_DONE)
197                         ret = RESUME_GUEST;
198                 else {
199                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
200                         ret = RESUME_HOST;
201                 }
202         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
203                 if (kvm_mips_handle_kseg0_tlb_fault
204                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
205                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
206                         ret = RESUME_HOST;
207                 }
208         } else {
209                 kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
210                         cause, opc, badvaddr);
211                 kvm_mips_dump_host_tlbs();
212                 kvm_arch_vcpu_dump_regs(vcpu);
213                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214                 ret = RESUME_HOST;
215         }
216         return ret;
217 }
218
219 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
220 {
221         struct kvm_run *run = vcpu->run;
222         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
223         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
224         unsigned long cause = vcpu->arch.host_cp0_cause;
225         enum emulation_result er = EMULATE_DONE;
226         int ret = RESUME_GUEST;
227
228         if (KVM_GUEST_KERNEL_MODE(vcpu)
229             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
230                 kvm_debug("Emulate Store to MMIO space\n");
231                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
232                 if (er == EMULATE_FAIL) {
233                         kvm_err("Emulate Store to MMIO space failed\n");
234                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
235                         ret = RESUME_HOST;
236                 } else {
237                         run->exit_reason = KVM_EXIT_MMIO;
238                         ret = RESUME_HOST;
239                 }
240         } else {
241                 kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
242                         cause, opc, badvaddr);
243                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
244                 ret = RESUME_HOST;
245         }
246         return ret;
247 }
248
249 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
250 {
251         struct kvm_run *run = vcpu->run;
252         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
253         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
254         unsigned long cause = vcpu->arch.host_cp0_cause;
255         enum emulation_result er = EMULATE_DONE;
256         int ret = RESUME_GUEST;
257
258         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
259                 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
260                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
261                 if (er == EMULATE_FAIL) {
262                         kvm_err("Emulate Load from MMIO space failed\n");
263                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
264                         ret = RESUME_HOST;
265                 } else {
266                         run->exit_reason = KVM_EXIT_MMIO;
267                         ret = RESUME_HOST;
268                 }
269         } else {
270                 kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
271                         cause, opc, badvaddr);
272                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
273                 ret = RESUME_HOST;
274                 er = EMULATE_FAIL;
275         }
276         return ret;
277 }
278
279 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
280 {
281         struct kvm_run *run = vcpu->run;
282         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
283         unsigned long cause = vcpu->arch.host_cp0_cause;
284         enum emulation_result er = EMULATE_DONE;
285         int ret = RESUME_GUEST;
286
287         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
288         if (er == EMULATE_DONE)
289                 ret = RESUME_GUEST;
290         else {
291                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
292                 ret = RESUME_HOST;
293         }
294         return ret;
295 }
296
297 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
298 {
299         struct kvm_run *run = vcpu->run;
300         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
301         unsigned long cause = vcpu->arch.host_cp0_cause;
302         enum emulation_result er = EMULATE_DONE;
303         int ret = RESUME_GUEST;
304
305         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
306         if (er == EMULATE_DONE)
307                 ret = RESUME_GUEST;
308         else {
309                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
310                 ret = RESUME_HOST;
311         }
312         return ret;
313 }
314
315 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
316 {
317         struct kvm_run *run = vcpu->run;
318         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
319         unsigned long cause = vcpu->arch.host_cp0_cause;
320         enum emulation_result er = EMULATE_DONE;
321         int ret = RESUME_GUEST;
322
323         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
324         if (er == EMULATE_DONE)
325                 ret = RESUME_GUEST;
326         else {
327                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
328                 ret = RESUME_HOST;
329         }
330         return ret;
331 }
332
333 static int kvm_trap_emul_vm_init(struct kvm *kvm)
334 {
335         return 0;
336 }
337
338 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
339 {
340         return 0;
341 }
342
343 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
344 {
345         struct mips_coproc *cop0 = vcpu->arch.cop0;
346         uint32_t config1;
347         int vcpu_id = vcpu->vcpu_id;
348
349         /*
350          * Arch specific stuff, set up config registers properly so that the
351          * guest will come up as expected, for now we simulate a MIPS 24kc
352          */
353         kvm_write_c0_guest_prid(cop0, 0x00019300);
354         kvm_write_c0_guest_config(cop0,
355                                   MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
356                                   (MMU_TYPE_R4000 << CP0C0_MT));
357
358         /* Read the cache characteristics from the host Config1 Register */
359         config1 = (read_c0_config1() & ~0x7f);
360
361         /* Set up MMU size */
362         config1 &= ~(0x3f << 25);
363         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
364
365         /* We unset some bits that we aren't emulating */
366         config1 &=
367             ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
368               (1 << CP0C1_WR) | (1 << CP0C1_CA));
369         kvm_write_c0_guest_config1(cop0, config1);
370
371         kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
372         /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
373         kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
374                                          (1 << CP0C3_ULRI));
375
376         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
377         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
378
379         /*
380          * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
381          */
382         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
383
384         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
385         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
386
387         return 0;
388 }
389
390 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
391                                      const struct kvm_one_reg *reg,
392                                      s64 *v)
393 {
394         switch (reg->id) {
395         case KVM_REG_MIPS_CP0_COUNT:
396                 *v = kvm_mips_read_count(vcpu);
397                 break;
398         case KVM_REG_MIPS_COUNT_CTL:
399                 *v = vcpu->arch.count_ctl;
400                 break;
401         case KVM_REG_MIPS_COUNT_RESUME:
402                 *v = ktime_to_ns(vcpu->arch.count_resume);
403                 break;
404         case KVM_REG_MIPS_COUNT_HZ:
405                 *v = vcpu->arch.count_hz;
406                 break;
407         default:
408                 return -EINVAL;
409         }
410         return 0;
411 }
412
413 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
414                                      const struct kvm_one_reg *reg,
415                                      s64 v)
416 {
417         struct mips_coproc *cop0 = vcpu->arch.cop0;
418         int ret = 0;
419
420         switch (reg->id) {
421         case KVM_REG_MIPS_CP0_COUNT:
422                 kvm_mips_write_count(vcpu, v);
423                 break;
424         case KVM_REG_MIPS_CP0_COMPARE:
425                 kvm_mips_write_compare(vcpu, v);
426                 break;
427         case KVM_REG_MIPS_CP0_CAUSE:
428                 /*
429                  * If the timer is stopped or started (DC bit) it must look
430                  * atomic with changes to the interrupt pending bits (TI, IRQ5).
431                  * A timer interrupt should not happen in between.
432                  */
433                 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
434                         if (v & CAUSEF_DC) {
435                                 /* disable timer first */
436                                 kvm_mips_count_disable_cause(vcpu);
437                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
438                         } else {
439                                 /* enable timer last */
440                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
441                                 kvm_mips_count_enable_cause(vcpu);
442                         }
443                 } else {
444                         kvm_write_c0_guest_cause(cop0, v);
445                 }
446                 break;
447         case KVM_REG_MIPS_COUNT_CTL:
448                 ret = kvm_mips_set_count_ctl(vcpu, v);
449                 break;
450         case KVM_REG_MIPS_COUNT_RESUME:
451                 ret = kvm_mips_set_count_resume(vcpu, v);
452                 break;
453         case KVM_REG_MIPS_COUNT_HZ:
454                 ret = kvm_mips_set_count_hz(vcpu, v);
455                 break;
456         default:
457                 return -EINVAL;
458         }
459         return ret;
460 }
461
462 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
463         /* exit handlers */
464         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
465         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
466         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
467         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
468         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
469         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
470         .handle_syscall = kvm_trap_emul_handle_syscall,
471         .handle_res_inst = kvm_trap_emul_handle_res_inst,
472         .handle_break = kvm_trap_emul_handle_break,
473
474         .vm_init = kvm_trap_emul_vm_init,
475         .vcpu_init = kvm_trap_emul_vcpu_init,
476         .vcpu_setup = kvm_trap_emul_vcpu_setup,
477         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
478         .queue_timer_int = kvm_mips_queue_timer_int_cb,
479         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
480         .queue_io_int = kvm_mips_queue_io_int_cb,
481         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
482         .irq_deliver = kvm_mips_irq_deliver_cb,
483         .irq_clear = kvm_mips_irq_clear_cb,
484         .get_one_reg = kvm_trap_emul_get_one_reg,
485         .set_one_reg = kvm_trap_emul_set_one_reg,
486 };
487
488 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
489 {
490         *install_callbacks = &kvm_trap_emul_callbacks;
491         return 0;
492 }