]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - arch/s390/kvm/kvm-s390.c
Merge tag 'asoc-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound...
[can-eth-gw-linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 void kvm_arch_hardware_disable(void *garbage)
96 {
97 }
98
99 int kvm_arch_hardware_setup(void)
100 {
101         return 0;
102 }
103
104 void kvm_arch_hardware_unsetup(void)
105 {
106 }
107
108 void kvm_arch_check_processor_compat(void *rtn)
109 {
110 }
111
112 int kvm_arch_init(void *opaque)
113 {
114         return 0;
115 }
116
117 void kvm_arch_exit(void)
118 {
119 }
120
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123                         unsigned int ioctl, unsigned long arg)
124 {
125         if (ioctl == KVM_S390_ENABLE_SIE)
126                 return s390_enable_sie();
127         return -EINVAL;
128 }
129
130 int kvm_dev_ioctl_check_extension(long ext)
131 {
132         int r;
133
134         switch (ext) {
135         case KVM_CAP_S390_PSW:
136         case KVM_CAP_S390_GMAP:
137         case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139         case KVM_CAP_S390_UCONTROL:
140 #endif
141         case KVM_CAP_SYNC_REGS:
142         case KVM_CAP_ONE_REG:
143                 r = 1;
144                 break;
145         case KVM_CAP_NR_VCPUS:
146         case KVM_CAP_MAX_VCPUS:
147                 r = KVM_MAX_VCPUS;
148                 break;
149         case KVM_CAP_S390_COW:
150                 r = sclp_get_fac85() & 0x2;
151                 break;
152         default:
153                 r = 0;
154         }
155         return r;
156 }
157
158 /* Section: vm related */
159 /*
160  * Get (and clear) the dirty memory log for a memory slot.
161  */
162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163                                struct kvm_dirty_log *log)
164 {
165         return 0;
166 }
167
168 long kvm_arch_vm_ioctl(struct file *filp,
169                        unsigned int ioctl, unsigned long arg)
170 {
171         struct kvm *kvm = filp->private_data;
172         void __user *argp = (void __user *)arg;
173         int r;
174
175         switch (ioctl) {
176         case KVM_S390_INTERRUPT: {
177                 struct kvm_s390_interrupt s390int;
178
179                 r = -EFAULT;
180                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181                         break;
182                 r = kvm_s390_inject_vm(kvm, &s390int);
183                 break;
184         }
185         default:
186                 r = -ENOTTY;
187         }
188
189         return r;
190 }
191
192 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
193 {
194         int rc;
195         char debug_name[16];
196
197         rc = -EINVAL;
198 #ifdef CONFIG_KVM_S390_UCONTROL
199         if (type & ~KVM_VM_S390_UCONTROL)
200                 goto out_err;
201         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202                 goto out_err;
203 #else
204         if (type)
205                 goto out_err;
206 #endif
207
208         rc = s390_enable_sie();
209         if (rc)
210                 goto out_err;
211
212         rc = -ENOMEM;
213
214         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215         if (!kvm->arch.sca)
216                 goto out_err;
217
218         sprintf(debug_name, "kvm-%u", current->pid);
219
220         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221         if (!kvm->arch.dbf)
222                 goto out_nodbf;
223
224         spin_lock_init(&kvm->arch.float_int.lock);
225         INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
227         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228         VM_EVENT(kvm, 3, "%s", "vm created");
229
230         if (type & KVM_VM_S390_UCONTROL) {
231                 kvm->arch.gmap = NULL;
232         } else {
233                 kvm->arch.gmap = gmap_alloc(current->mm);
234                 if (!kvm->arch.gmap)
235                         goto out_nogmap;
236         }
237         return 0;
238 out_nogmap:
239         debug_unregister(kvm->arch.dbf);
240 out_nodbf:
241         free_page((unsigned long)(kvm->arch.sca));
242 out_err:
243         return rc;
244 }
245
246 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247 {
248         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
250         if (!kvm_is_ucontrol(vcpu->kvm)) {
251                 clear_bit(63 - vcpu->vcpu_id,
252                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254                     (__u64) vcpu->arch.sie_block)
255                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256         }
257         smp_mb();
258
259         if (kvm_is_ucontrol(vcpu->kvm))
260                 gmap_free(vcpu->arch.gmap);
261
262         free_page((unsigned long)(vcpu->arch.sie_block));
263         kvm_vcpu_uninit(vcpu);
264         kfree(vcpu);
265 }
266
267 static void kvm_free_vcpus(struct kvm *kvm)
268 {
269         unsigned int i;
270         struct kvm_vcpu *vcpu;
271
272         kvm_for_each_vcpu(i, vcpu, kvm)
273                 kvm_arch_vcpu_destroy(vcpu);
274
275         mutex_lock(&kvm->lock);
276         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277                 kvm->vcpus[i] = NULL;
278
279         atomic_set(&kvm->online_vcpus, 0);
280         mutex_unlock(&kvm->lock);
281 }
282
283 void kvm_arch_sync_events(struct kvm *kvm)
284 {
285 }
286
287 void kvm_arch_destroy_vm(struct kvm *kvm)
288 {
289         kvm_free_vcpus(kvm);
290         free_page((unsigned long)(kvm->arch.sca));
291         debug_unregister(kvm->arch.dbf);
292         if (!kvm_is_ucontrol(kvm))
293                 gmap_free(kvm->arch.gmap);
294 }
295
296 /* Section: vcpu related */
297 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298 {
299         if (kvm_is_ucontrol(vcpu->kvm)) {
300                 vcpu->arch.gmap = gmap_alloc(current->mm);
301                 if (!vcpu->arch.gmap)
302                         return -ENOMEM;
303                 return 0;
304         }
305
306         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
307         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308                                     KVM_SYNC_GPRS |
309                                     KVM_SYNC_ACRS |
310                                     KVM_SYNC_CRS;
311         return 0;
312 }
313
314 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315 {
316         /* Nothing todo */
317 }
318
319 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320 {
321         save_fp_regs(&vcpu->arch.host_fpregs);
322         save_access_regs(vcpu->arch.host_acrs);
323         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324         restore_fp_regs(&vcpu->arch.guest_fpregs);
325         restore_access_regs(vcpu->run->s.regs.acrs);
326         gmap_enable(vcpu->arch.gmap);
327         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328 }
329
330 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331 {
332         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333         gmap_disable(vcpu->arch.gmap);
334         save_fp_regs(&vcpu->arch.guest_fpregs);
335         save_access_regs(vcpu->run->s.regs.acrs);
336         restore_fp_regs(&vcpu->arch.host_fpregs);
337         restore_access_regs(vcpu->arch.host_acrs);
338 }
339
340 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341 {
342         /* this equals initial cpu reset in pop, but we don't switch to ESA */
343         vcpu->arch.sie_block->gpsw.mask = 0UL;
344         vcpu->arch.sie_block->gpsw.addr = 0UL;
345         kvm_s390_set_prefix(vcpu, 0);
346         vcpu->arch.sie_block->cputm     = 0UL;
347         vcpu->arch.sie_block->ckc       = 0UL;
348         vcpu->arch.sie_block->todpr     = 0;
349         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
351         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352         vcpu->arch.guest_fpregs.fpc = 0;
353         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354         vcpu->arch.sie_block->gbea = 1;
355         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
356 }
357
358 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
359 {
360         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
361                                                     CPUSTAT_SM |
362                                                     CPUSTAT_STOPPED);
363         vcpu->arch.sie_block->ecb   = 6;
364         vcpu->arch.sie_block->eca   = 0xC1002001U;
365         vcpu->arch.sie_block->fac   = (int) (long) facilities;
366         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
367         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
368                      (unsigned long) vcpu);
369         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
370         get_cpu_id(&vcpu->arch.cpu_id);
371         vcpu->arch.cpu_id.version = 0xff;
372         return 0;
373 }
374
375 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
376                                       unsigned int id)
377 {
378         struct kvm_vcpu *vcpu;
379         int rc = -EINVAL;
380
381         if (id >= KVM_MAX_VCPUS)
382                 goto out;
383
384         rc = -ENOMEM;
385
386         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
387         if (!vcpu)
388                 goto out;
389
390         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
391                                         get_zeroed_page(GFP_KERNEL);
392
393         if (!vcpu->arch.sie_block)
394                 goto out_free_cpu;
395
396         vcpu->arch.sie_block->icpua = id;
397         if (!kvm_is_ucontrol(kvm)) {
398                 if (!kvm->arch.sca) {
399                         WARN_ON_ONCE(1);
400                         goto out_free_cpu;
401                 }
402                 if (!kvm->arch.sca->cpu[id].sda)
403                         kvm->arch.sca->cpu[id].sda =
404                                 (__u64) vcpu->arch.sie_block;
405                 vcpu->arch.sie_block->scaoh =
406                         (__u32)(((__u64)kvm->arch.sca) >> 32);
407                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
408                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
409         }
410
411         spin_lock_init(&vcpu->arch.local_int.lock);
412         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
413         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
414         spin_lock(&kvm->arch.float_int.lock);
415         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
416         init_waitqueue_head(&vcpu->arch.local_int.wq);
417         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
418         spin_unlock(&kvm->arch.float_int.lock);
419
420         rc = kvm_vcpu_init(vcpu, kvm, id);
421         if (rc)
422                 goto out_free_sie_block;
423         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
424                  vcpu->arch.sie_block);
425         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
426
427         return vcpu;
428 out_free_sie_block:
429         free_page((unsigned long)(vcpu->arch.sie_block));
430 out_free_cpu:
431         kfree(vcpu);
432 out:
433         return ERR_PTR(rc);
434 }
435
436 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
437 {
438         /* kvm common code refers to this, but never calls it */
439         BUG();
440         return 0;
441 }
442
443 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
444 {
445         /* kvm common code refers to this, but never calls it */
446         BUG();
447         return 0;
448 }
449
450 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
451                                            struct kvm_one_reg *reg)
452 {
453         int r = -EINVAL;
454
455         switch (reg->id) {
456         case KVM_REG_S390_TODPR:
457                 r = put_user(vcpu->arch.sie_block->todpr,
458                              (u32 __user *)reg->addr);
459                 break;
460         case KVM_REG_S390_EPOCHDIFF:
461                 r = put_user(vcpu->arch.sie_block->epoch,
462                              (u64 __user *)reg->addr);
463                 break;
464         case KVM_REG_S390_CPU_TIMER:
465                 r = put_user(vcpu->arch.sie_block->cputm,
466                              (u64 __user *)reg->addr);
467                 break;
468         case KVM_REG_S390_CLOCK_COMP:
469                 r = put_user(vcpu->arch.sie_block->ckc,
470                              (u64 __user *)reg->addr);
471                 break;
472         default:
473                 break;
474         }
475
476         return r;
477 }
478
479 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
480                                            struct kvm_one_reg *reg)
481 {
482         int r = -EINVAL;
483
484         switch (reg->id) {
485         case KVM_REG_S390_TODPR:
486                 r = get_user(vcpu->arch.sie_block->todpr,
487                              (u32 __user *)reg->addr);
488                 break;
489         case KVM_REG_S390_EPOCHDIFF:
490                 r = get_user(vcpu->arch.sie_block->epoch,
491                              (u64 __user *)reg->addr);
492                 break;
493         case KVM_REG_S390_CPU_TIMER:
494                 r = get_user(vcpu->arch.sie_block->cputm,
495                              (u64 __user *)reg->addr);
496                 break;
497         case KVM_REG_S390_CLOCK_COMP:
498                 r = get_user(vcpu->arch.sie_block->ckc,
499                              (u64 __user *)reg->addr);
500                 break;
501         default:
502                 break;
503         }
504
505         return r;
506 }
507
508 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
509 {
510         kvm_s390_vcpu_initial_reset(vcpu);
511         return 0;
512 }
513
514 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
515 {
516         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
517         return 0;
518 }
519
520 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
521 {
522         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
523         return 0;
524 }
525
526 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
527                                   struct kvm_sregs *sregs)
528 {
529         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
530         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
531         restore_access_regs(vcpu->run->s.regs.acrs);
532         return 0;
533 }
534
535 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
536                                   struct kvm_sregs *sregs)
537 {
538         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
539         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
540         return 0;
541 }
542
543 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
544 {
545         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
546         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
547         restore_fp_regs(&vcpu->arch.guest_fpregs);
548         return 0;
549 }
550
551 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
552 {
553         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
554         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
555         return 0;
556 }
557
558 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
559 {
560         int rc = 0;
561
562         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
563                 rc = -EBUSY;
564         else {
565                 vcpu->run->psw_mask = psw.mask;
566                 vcpu->run->psw_addr = psw.addr;
567         }
568         return rc;
569 }
570
571 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
572                                   struct kvm_translation *tr)
573 {
574         return -EINVAL; /* not implemented yet */
575 }
576
577 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
578                                         struct kvm_guest_debug *dbg)
579 {
580         return -EINVAL; /* not implemented yet */
581 }
582
583 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
584                                     struct kvm_mp_state *mp_state)
585 {
586         return -EINVAL; /* not implemented yet */
587 }
588
589 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
590                                     struct kvm_mp_state *mp_state)
591 {
592         return -EINVAL; /* not implemented yet */
593 }
594
595 static int __vcpu_run(struct kvm_vcpu *vcpu)
596 {
597         int rc;
598
599         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
600
601         if (need_resched())
602                 schedule();
603
604         if (test_thread_flag(TIF_MCCK_PENDING))
605                 s390_handle_mcck();
606
607         if (!kvm_is_ucontrol(vcpu->kvm))
608                 kvm_s390_deliver_pending_interrupts(vcpu);
609
610         vcpu->arch.sie_block->icptcode = 0;
611         local_irq_disable();
612         kvm_guest_enter();
613         local_irq_enable();
614         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
615                    atomic_read(&vcpu->arch.sie_block->cpuflags));
616         trace_kvm_s390_sie_enter(vcpu,
617                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
618         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
619         if (rc) {
620                 if (kvm_is_ucontrol(vcpu->kvm)) {
621                         rc = SIE_INTERCEPT_UCONTROL;
622                 } else {
623                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
624                         trace_kvm_s390_sie_fault(vcpu);
625                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
626                         rc = 0;
627                 }
628         }
629         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
630                    vcpu->arch.sie_block->icptcode);
631         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
632         local_irq_disable();
633         kvm_guest_exit();
634         local_irq_enable();
635
636         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
637         return rc;
638 }
639
640 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
641 {
642         int rc;
643         sigset_t sigsaved;
644
645 rerun_vcpu:
646         if (vcpu->sigset_active)
647                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
648
649         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
650
651         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
652
653         switch (kvm_run->exit_reason) {
654         case KVM_EXIT_S390_SIEIC:
655         case KVM_EXIT_UNKNOWN:
656         case KVM_EXIT_INTR:
657         case KVM_EXIT_S390_RESET:
658         case KVM_EXIT_S390_UCONTROL:
659                 break;
660         default:
661                 BUG();
662         }
663
664         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
665         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
666         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
667                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
668                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
669         }
670         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
671                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
672                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
673                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
674         }
675
676         might_fault();
677
678         do {
679                 rc = __vcpu_run(vcpu);
680                 if (rc)
681                         break;
682                 if (kvm_is_ucontrol(vcpu->kvm))
683                         rc = -EOPNOTSUPP;
684                 else
685                         rc = kvm_handle_sie_intercept(vcpu);
686         } while (!signal_pending(current) && !rc);
687
688         if (rc == SIE_INTERCEPT_RERUNVCPU)
689                 goto rerun_vcpu;
690
691         if (signal_pending(current) && !rc) {
692                 kvm_run->exit_reason = KVM_EXIT_INTR;
693                 rc = -EINTR;
694         }
695
696 #ifdef CONFIG_KVM_S390_UCONTROL
697         if (rc == SIE_INTERCEPT_UCONTROL) {
698                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
699                 kvm_run->s390_ucontrol.trans_exc_code =
700                         current->thread.gmap_addr;
701                 kvm_run->s390_ucontrol.pgm_code = 0x10;
702                 rc = 0;
703         }
704 #endif
705
706         if (rc == -EOPNOTSUPP) {
707                 /* intercept cannot be handled in-kernel, prepare kvm-run */
708                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
709                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
710                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
711                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
712                 rc = 0;
713         }
714
715         if (rc == -EREMOTE) {
716                 /* intercept was handled, but userspace support is needed
717                  * kvm_run has been prepared by the handler */
718                 rc = 0;
719         }
720
721         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
722         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
723         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
724         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
725
726         if (vcpu->sigset_active)
727                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
728
729         vcpu->stat.exit_userspace++;
730         return rc;
731 }
732
733 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
734                        unsigned long n, int prefix)
735 {
736         if (prefix)
737                 return copy_to_guest(vcpu, guestdest, from, n);
738         else
739                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
740 }
741
742 /*
743  * store status at address
744  * we use have two special cases:
745  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
746  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
747  */
748 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
749 {
750         unsigned char archmode = 1;
751         int prefix;
752
753         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
754                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
755                         return -EFAULT;
756                 addr = SAVE_AREA_BASE;
757                 prefix = 0;
758         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
759                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
760                         return -EFAULT;
761                 addr = SAVE_AREA_BASE;
762                 prefix = 1;
763         } else
764                 prefix = 0;
765
766         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
767                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
768                 return -EFAULT;
769
770         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
771                         vcpu->run->s.regs.gprs, 128, prefix))
772                 return -EFAULT;
773
774         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
775                         &vcpu->arch.sie_block->gpsw, 16, prefix))
776                 return -EFAULT;
777
778         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
779                         &vcpu->arch.sie_block->prefix, 4, prefix))
780                 return -EFAULT;
781
782         if (__guestcopy(vcpu,
783                         addr + offsetof(struct save_area, fp_ctrl_reg),
784                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
785                 return -EFAULT;
786
787         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
788                         &vcpu->arch.sie_block->todpr, 4, prefix))
789                 return -EFAULT;
790
791         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
792                         &vcpu->arch.sie_block->cputm, 8, prefix))
793                 return -EFAULT;
794
795         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
796                         &vcpu->arch.sie_block->ckc, 8, prefix))
797                 return -EFAULT;
798
799         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
800                         &vcpu->run->s.regs.acrs, 64, prefix))
801                 return -EFAULT;
802
803         if (__guestcopy(vcpu,
804                         addr + offsetof(struct save_area, ctrl_regs),
805                         &vcpu->arch.sie_block->gcr, 128, prefix))
806                 return -EFAULT;
807         return 0;
808 }
809
810 long kvm_arch_vcpu_ioctl(struct file *filp,
811                          unsigned int ioctl, unsigned long arg)
812 {
813         struct kvm_vcpu *vcpu = filp->private_data;
814         void __user *argp = (void __user *)arg;
815         long r;
816
817         switch (ioctl) {
818         case KVM_S390_INTERRUPT: {
819                 struct kvm_s390_interrupt s390int;
820
821                 r = -EFAULT;
822                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
823                         break;
824                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
825                 break;
826         }
827         case KVM_S390_STORE_STATUS:
828                 r = kvm_s390_vcpu_store_status(vcpu, arg);
829                 break;
830         case KVM_S390_SET_INITIAL_PSW: {
831                 psw_t psw;
832
833                 r = -EFAULT;
834                 if (copy_from_user(&psw, argp, sizeof(psw)))
835                         break;
836                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
837                 break;
838         }
839         case KVM_S390_INITIAL_RESET:
840                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
841                 break;
842         case KVM_SET_ONE_REG:
843         case KVM_GET_ONE_REG: {
844                 struct kvm_one_reg reg;
845                 r = -EFAULT;
846                 if (copy_from_user(&reg, argp, sizeof(reg)))
847                         break;
848                 if (ioctl == KVM_SET_ONE_REG)
849                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
850                 else
851                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
852                 break;
853         }
854 #ifdef CONFIG_KVM_S390_UCONTROL
855         case KVM_S390_UCAS_MAP: {
856                 struct kvm_s390_ucas_mapping ucasmap;
857
858                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
859                         r = -EFAULT;
860                         break;
861                 }
862
863                 if (!kvm_is_ucontrol(vcpu->kvm)) {
864                         r = -EINVAL;
865                         break;
866                 }
867
868                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
869                                      ucasmap.vcpu_addr, ucasmap.length);
870                 break;
871         }
872         case KVM_S390_UCAS_UNMAP: {
873                 struct kvm_s390_ucas_mapping ucasmap;
874
875                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
876                         r = -EFAULT;
877                         break;
878                 }
879
880                 if (!kvm_is_ucontrol(vcpu->kvm)) {
881                         r = -EINVAL;
882                         break;
883                 }
884
885                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
886                         ucasmap.length);
887                 break;
888         }
889 #endif
890         case KVM_S390_VCPU_FAULT: {
891                 r = gmap_fault(arg, vcpu->arch.gmap);
892                 if (!IS_ERR_VALUE(r))
893                         r = 0;
894                 break;
895         }
896         default:
897                 r = -ENOTTY;
898         }
899         return r;
900 }
901
902 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
903 {
904 #ifdef CONFIG_KVM_S390_UCONTROL
905         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
906                  && (kvm_is_ucontrol(vcpu->kvm))) {
907                 vmf->page = virt_to_page(vcpu->arch.sie_block);
908                 get_page(vmf->page);
909                 return 0;
910         }
911 #endif
912         return VM_FAULT_SIGBUS;
913 }
914
915 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
916                            struct kvm_memory_slot *dont)
917 {
918 }
919
920 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
921 {
922         return 0;
923 }
924
925 /* Section: memory related */
926 int kvm_arch_prepare_memory_region(struct kvm *kvm,
927                                    struct kvm_memory_slot *memslot,
928                                    struct kvm_memory_slot old,
929                                    struct kvm_userspace_memory_region *mem,
930                                    int user_alloc)
931 {
932         /* A few sanity checks. We can have exactly one memory slot which has
933            to start at guest virtual zero and which has to be located at a
934            page boundary in userland and which has to end at a page boundary.
935            The memory in userland is ok to be fragmented into various different
936            vmas. It is okay to mmap() and munmap() stuff in this slot after
937            doing this call at any time */
938
939         if (mem->slot)
940                 return -EINVAL;
941
942         if (mem->guest_phys_addr)
943                 return -EINVAL;
944
945         if (mem->userspace_addr & 0xffffful)
946                 return -EINVAL;
947
948         if (mem->memory_size & 0xffffful)
949                 return -EINVAL;
950
951         if (!user_alloc)
952                 return -EINVAL;
953
954         return 0;
955 }
956
957 void kvm_arch_commit_memory_region(struct kvm *kvm,
958                                 struct kvm_userspace_memory_region *mem,
959                                 struct kvm_memory_slot old,
960                                 int user_alloc)
961 {
962         int rc;
963
964
965         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
966                 mem->guest_phys_addr, mem->memory_size);
967         if (rc)
968                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
969         return;
970 }
971
972 void kvm_arch_flush_shadow_all(struct kvm *kvm)
973 {
974 }
975
976 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
977                                    struct kvm_memory_slot *slot)
978 {
979 }
980
981 static int __init kvm_s390_init(void)
982 {
983         int ret;
984         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
985         if (ret)
986                 return ret;
987
988         /*
989          * guests can ask for up to 255+1 double words, we need a full page
990          * to hold the maximum amount of facilities. On the other hand, we
991          * only set facilities that are known to work in KVM.
992          */
993         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
994         if (!facilities) {
995                 kvm_exit();
996                 return -ENOMEM;
997         }
998         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
999         facilities[0] &= 0xff00fff3f47c0000ULL;
1000         facilities[1] &= 0x201c000000000000ULL;
1001         return 0;
1002 }
1003
1004 static void __exit kvm_s390_exit(void)
1005 {
1006         free_page((unsigned long) facilities);
1007         kvm_exit();
1008 }
1009
1010 module_init(kvm_s390_init);
1011 module_exit(kvm_s390_exit);