]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - arch/s390/kvm/kvm-s390.c
Merge tag 'boards' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[can-eth-gw-linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/sclp.h>
32 #include "kvm-s390.h"
33 #include "gaccess.h"
34
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 #include "trace-s390.h"
38
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42         { "userspace_handled", VCPU_STAT(exit_userspace) },
43         { "exit_null", VCPU_STAT(exit_null) },
44         { "exit_validity", VCPU_STAT(exit_validity) },
45         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46         { "exit_external_request", VCPU_STAT(exit_external_request) },
47         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48         { "exit_instruction", VCPU_STAT(exit_instruction) },
49         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63         { "instruction_spx", VCPU_STAT(instruction_spx) },
64         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65         { "instruction_stap", VCPU_STAT(instruction_stap) },
66         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80         { "diagnose_10", VCPU_STAT(diagnose_10) },
81         { "diagnose_44", VCPU_STAT(diagnose_44) },
82         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
83         { NULL }
84 };
85
86 static unsigned long long *facilities;
87
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
90 {
91         /* every s390 is virtualization enabled ;-) */
92         return 0;
93 }
94
95 void kvm_arch_hardware_disable(void *garbage)
96 {
97 }
98
99 int kvm_arch_hardware_setup(void)
100 {
101         return 0;
102 }
103
104 void kvm_arch_hardware_unsetup(void)
105 {
106 }
107
108 void kvm_arch_check_processor_compat(void *rtn)
109 {
110 }
111
112 int kvm_arch_init(void *opaque)
113 {
114         return 0;
115 }
116
117 void kvm_arch_exit(void)
118 {
119 }
120
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123                         unsigned int ioctl, unsigned long arg)
124 {
125         if (ioctl == KVM_S390_ENABLE_SIE)
126                 return s390_enable_sie();
127         return -EINVAL;
128 }
129
130 int kvm_dev_ioctl_check_extension(long ext)
131 {
132         int r;
133
134         switch (ext) {
135         case KVM_CAP_S390_PSW:
136         case KVM_CAP_S390_GMAP:
137         case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139         case KVM_CAP_S390_UCONTROL:
140 #endif
141         case KVM_CAP_SYNC_REGS:
142         case KVM_CAP_ONE_REG:
143                 r = 1;
144                 break;
145         case KVM_CAP_NR_VCPUS:
146         case KVM_CAP_MAX_VCPUS:
147                 r = KVM_MAX_VCPUS;
148                 break;
149         case KVM_CAP_S390_COW:
150                 r = sclp_get_fac85() & 0x2;
151                 break;
152         default:
153                 r = 0;
154         }
155         return r;
156 }
157
158 /* Section: vm related */
159 /*
160  * Get (and clear) the dirty memory log for a memory slot.
161  */
162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163                                struct kvm_dirty_log *log)
164 {
165         return 0;
166 }
167
168 long kvm_arch_vm_ioctl(struct file *filp,
169                        unsigned int ioctl, unsigned long arg)
170 {
171         struct kvm *kvm = filp->private_data;
172         void __user *argp = (void __user *)arg;
173         int r;
174
175         switch (ioctl) {
176         case KVM_S390_INTERRUPT: {
177                 struct kvm_s390_interrupt s390int;
178
179                 r = -EFAULT;
180                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181                         break;
182                 r = kvm_s390_inject_vm(kvm, &s390int);
183                 break;
184         }
185         default:
186                 r = -ENOTTY;
187         }
188
189         return r;
190 }
191
192 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
193 {
194         int rc;
195         char debug_name[16];
196
197         rc = -EINVAL;
198 #ifdef CONFIG_KVM_S390_UCONTROL
199         if (type & ~KVM_VM_S390_UCONTROL)
200                 goto out_err;
201         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202                 goto out_err;
203 #else
204         if (type)
205                 goto out_err;
206 #endif
207
208         rc = s390_enable_sie();
209         if (rc)
210                 goto out_err;
211
212         rc = -ENOMEM;
213
214         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215         if (!kvm->arch.sca)
216                 goto out_err;
217
218         sprintf(debug_name, "kvm-%u", current->pid);
219
220         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221         if (!kvm->arch.dbf)
222                 goto out_nodbf;
223
224         spin_lock_init(&kvm->arch.float_int.lock);
225         INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
227         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228         VM_EVENT(kvm, 3, "%s", "vm created");
229
230         if (type & KVM_VM_S390_UCONTROL) {
231                 kvm->arch.gmap = NULL;
232         } else {
233                 kvm->arch.gmap = gmap_alloc(current->mm);
234                 if (!kvm->arch.gmap)
235                         goto out_nogmap;
236         }
237         return 0;
238 out_nogmap:
239         debug_unregister(kvm->arch.dbf);
240 out_nodbf:
241         free_page((unsigned long)(kvm->arch.sca));
242 out_err:
243         return rc;
244 }
245
246 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247 {
248         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
249         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
250         if (!kvm_is_ucontrol(vcpu->kvm)) {
251                 clear_bit(63 - vcpu->vcpu_id,
252                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254                     (__u64) vcpu->arch.sie_block)
255                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256         }
257         smp_mb();
258
259         if (kvm_is_ucontrol(vcpu->kvm))
260                 gmap_free(vcpu->arch.gmap);
261
262         free_page((unsigned long)(vcpu->arch.sie_block));
263         kvm_vcpu_uninit(vcpu);
264         kfree(vcpu);
265 }
266
267 static void kvm_free_vcpus(struct kvm *kvm)
268 {
269         unsigned int i;
270         struct kvm_vcpu *vcpu;
271
272         kvm_for_each_vcpu(i, vcpu, kvm)
273                 kvm_arch_vcpu_destroy(vcpu);
274
275         mutex_lock(&kvm->lock);
276         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277                 kvm->vcpus[i] = NULL;
278
279         atomic_set(&kvm->online_vcpus, 0);
280         mutex_unlock(&kvm->lock);
281 }
282
283 void kvm_arch_sync_events(struct kvm *kvm)
284 {
285 }
286
287 void kvm_arch_destroy_vm(struct kvm *kvm)
288 {
289         kvm_free_vcpus(kvm);
290         free_page((unsigned long)(kvm->arch.sca));
291         debug_unregister(kvm->arch.dbf);
292         if (!kvm_is_ucontrol(kvm))
293                 gmap_free(kvm->arch.gmap);
294 }
295
296 /* Section: vcpu related */
297 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298 {
299         if (kvm_is_ucontrol(vcpu->kvm)) {
300                 vcpu->arch.gmap = gmap_alloc(current->mm);
301                 if (!vcpu->arch.gmap)
302                         return -ENOMEM;
303                 return 0;
304         }
305
306         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
307         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308                                     KVM_SYNC_GPRS |
309                                     KVM_SYNC_ACRS |
310                                     KVM_SYNC_CRS;
311         return 0;
312 }
313
314 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315 {
316         /* Nothing todo */
317 }
318
319 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320 {
321         save_fp_regs(&vcpu->arch.host_fpregs);
322         save_access_regs(vcpu->arch.host_acrs);
323         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324         restore_fp_regs(&vcpu->arch.guest_fpregs);
325         restore_access_regs(vcpu->run->s.regs.acrs);
326         gmap_enable(vcpu->arch.gmap);
327         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328 }
329
330 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331 {
332         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
333         gmap_disable(vcpu->arch.gmap);
334         save_fp_regs(&vcpu->arch.guest_fpregs);
335         save_access_regs(vcpu->run->s.regs.acrs);
336         restore_fp_regs(&vcpu->arch.host_fpregs);
337         restore_access_regs(vcpu->arch.host_acrs);
338 }
339
340 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341 {
342         /* this equals initial cpu reset in pop, but we don't switch to ESA */
343         vcpu->arch.sie_block->gpsw.mask = 0UL;
344         vcpu->arch.sie_block->gpsw.addr = 0UL;
345         kvm_s390_set_prefix(vcpu, 0);
346         vcpu->arch.sie_block->cputm     = 0UL;
347         vcpu->arch.sie_block->ckc       = 0UL;
348         vcpu->arch.sie_block->todpr     = 0;
349         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
351         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352         vcpu->arch.guest_fpregs.fpc = 0;
353         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354         vcpu->arch.sie_block->gbea = 1;
355         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
356 }
357
358 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
359 {
360         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
361                                                     CPUSTAT_SM |
362                                                     CPUSTAT_STOPPED);
363         vcpu->arch.sie_block->ecb   = 6;
364         vcpu->arch.sie_block->eca   = 0xC1002001U;
365         vcpu->arch.sie_block->fac   = (int) (long) facilities;
366         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
367         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
368                      (unsigned long) vcpu);
369         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
370         get_cpu_id(&vcpu->arch.cpu_id);
371         vcpu->arch.cpu_id.version = 0xff;
372         return 0;
373 }
374
375 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
376                                       unsigned int id)
377 {
378         struct kvm_vcpu *vcpu;
379         int rc = -EINVAL;
380
381         if (id >= KVM_MAX_VCPUS)
382                 goto out;
383
384         rc = -ENOMEM;
385
386         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
387         if (!vcpu)
388                 goto out;
389
390         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
391                                         get_zeroed_page(GFP_KERNEL);
392
393         if (!vcpu->arch.sie_block)
394                 goto out_free_cpu;
395
396         vcpu->arch.sie_block->icpua = id;
397         if (!kvm_is_ucontrol(kvm)) {
398                 if (!kvm->arch.sca) {
399                         WARN_ON_ONCE(1);
400                         goto out_free_cpu;
401                 }
402                 if (!kvm->arch.sca->cpu[id].sda)
403                         kvm->arch.sca->cpu[id].sda =
404                                 (__u64) vcpu->arch.sie_block;
405                 vcpu->arch.sie_block->scaoh =
406                         (__u32)(((__u64)kvm->arch.sca) >> 32);
407                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
408                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
409         }
410
411         spin_lock_init(&vcpu->arch.local_int.lock);
412         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
413         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
414         spin_lock(&kvm->arch.float_int.lock);
415         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
416         init_waitqueue_head(&vcpu->arch.local_int.wq);
417         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
418         spin_unlock(&kvm->arch.float_int.lock);
419
420         rc = kvm_vcpu_init(vcpu, kvm, id);
421         if (rc)
422                 goto out_free_sie_block;
423         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
424                  vcpu->arch.sie_block);
425         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
426
427         return vcpu;
428 out_free_sie_block:
429         free_page((unsigned long)(vcpu->arch.sie_block));
430 out_free_cpu:
431         kfree(vcpu);
432 out:
433         return ERR_PTR(rc);
434 }
435
436 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
437 {
438         /* kvm common code refers to this, but never calls it */
439         BUG();
440         return 0;
441 }
442
443 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
444 {
445         /* kvm common code refers to this, but never calls it */
446         BUG();
447         return 0;
448 }
449
450 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
451                                            struct kvm_one_reg *reg)
452 {
453         int r = -EINVAL;
454
455         switch (reg->id) {
456         case KVM_REG_S390_TODPR:
457                 r = put_user(vcpu->arch.sie_block->todpr,
458                              (u32 __user *)reg->addr);
459                 break;
460         case KVM_REG_S390_EPOCHDIFF:
461                 r = put_user(vcpu->arch.sie_block->epoch,
462                              (u64 __user *)reg->addr);
463                 break;
464         case KVM_REG_S390_CPU_TIMER:
465                 r = put_user(vcpu->arch.sie_block->cputm,
466                              (u64 __user *)reg->addr);
467                 break;
468         case KVM_REG_S390_CLOCK_COMP:
469                 r = put_user(vcpu->arch.sie_block->ckc,
470                              (u64 __user *)reg->addr);
471                 break;
472         default:
473                 break;
474         }
475
476         return r;
477 }
478
479 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
480                                            struct kvm_one_reg *reg)
481 {
482         int r = -EINVAL;
483
484         switch (reg->id) {
485         case KVM_REG_S390_TODPR:
486                 r = get_user(vcpu->arch.sie_block->todpr,
487                              (u32 __user *)reg->addr);
488                 break;
489         case KVM_REG_S390_EPOCHDIFF:
490                 r = get_user(vcpu->arch.sie_block->epoch,
491                              (u64 __user *)reg->addr);
492                 break;
493         case KVM_REG_S390_CPU_TIMER:
494                 r = get_user(vcpu->arch.sie_block->cputm,
495                              (u64 __user *)reg->addr);
496                 break;
497         case KVM_REG_S390_CLOCK_COMP:
498                 r = get_user(vcpu->arch.sie_block->ckc,
499                              (u64 __user *)reg->addr);
500                 break;
501         default:
502                 break;
503         }
504
505         return r;
506 }
507
508 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
509 {
510         kvm_s390_vcpu_initial_reset(vcpu);
511         return 0;
512 }
513
514 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
515 {
516         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
517         return 0;
518 }
519
520 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
521 {
522         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
523         return 0;
524 }
525
526 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
527                                   struct kvm_sregs *sregs)
528 {
529         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
530         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
531         restore_access_regs(vcpu->run->s.regs.acrs);
532         return 0;
533 }
534
535 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
536                                   struct kvm_sregs *sregs)
537 {
538         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
539         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
540         return 0;
541 }
542
543 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
544 {
545         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
546         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
547         restore_fp_regs(&vcpu->arch.guest_fpregs);
548         return 0;
549 }
550
551 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
552 {
553         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
554         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
555         return 0;
556 }
557
558 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
559 {
560         int rc = 0;
561
562         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
563                 rc = -EBUSY;
564         else {
565                 vcpu->run->psw_mask = psw.mask;
566                 vcpu->run->psw_addr = psw.addr;
567         }
568         return rc;
569 }
570
571 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
572                                   struct kvm_translation *tr)
573 {
574         return -EINVAL; /* not implemented yet */
575 }
576
577 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
578                                         struct kvm_guest_debug *dbg)
579 {
580         return -EINVAL; /* not implemented yet */
581 }
582
583 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
584                                     struct kvm_mp_state *mp_state)
585 {
586         return -EINVAL; /* not implemented yet */
587 }
588
589 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
590                                     struct kvm_mp_state *mp_state)
591 {
592         return -EINVAL; /* not implemented yet */
593 }
594
595 static int __vcpu_run(struct kvm_vcpu *vcpu)
596 {
597         int rc;
598
599         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
600
601         if (need_resched())
602                 schedule();
603
604         if (test_thread_flag(TIF_MCCK_PENDING))
605                 s390_handle_mcck();
606
607         if (!kvm_is_ucontrol(vcpu->kvm))
608                 kvm_s390_deliver_pending_interrupts(vcpu);
609
610         vcpu->arch.sie_block->icptcode = 0;
611         kvm_guest_enter();
612         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
613                    atomic_read(&vcpu->arch.sie_block->cpuflags));
614         trace_kvm_s390_sie_enter(vcpu,
615                                  atomic_read(&vcpu->arch.sie_block->cpuflags));
616         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
617         if (rc) {
618                 if (kvm_is_ucontrol(vcpu->kvm)) {
619                         rc = SIE_INTERCEPT_UCONTROL;
620                 } else {
621                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
622                         trace_kvm_s390_sie_fault(vcpu);
623                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
624                         rc = 0;
625                 }
626         }
627         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
628                    vcpu->arch.sie_block->icptcode);
629         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
630         kvm_guest_exit();
631
632         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
633         return rc;
634 }
635
636 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
637 {
638         int rc;
639         sigset_t sigsaved;
640
641 rerun_vcpu:
642         if (vcpu->sigset_active)
643                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
644
645         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
646
647         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
648
649         switch (kvm_run->exit_reason) {
650         case KVM_EXIT_S390_SIEIC:
651         case KVM_EXIT_UNKNOWN:
652         case KVM_EXIT_INTR:
653         case KVM_EXIT_S390_RESET:
654         case KVM_EXIT_S390_UCONTROL:
655                 break;
656         default:
657                 BUG();
658         }
659
660         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
661         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
662         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
663                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
664                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
665         }
666         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
667                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
668                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
669                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
670         }
671
672         might_fault();
673
674         do {
675                 rc = __vcpu_run(vcpu);
676                 if (rc)
677                         break;
678                 if (kvm_is_ucontrol(vcpu->kvm))
679                         rc = -EOPNOTSUPP;
680                 else
681                         rc = kvm_handle_sie_intercept(vcpu);
682         } while (!signal_pending(current) && !rc);
683
684         if (rc == SIE_INTERCEPT_RERUNVCPU)
685                 goto rerun_vcpu;
686
687         if (signal_pending(current) && !rc) {
688                 kvm_run->exit_reason = KVM_EXIT_INTR;
689                 rc = -EINTR;
690         }
691
692 #ifdef CONFIG_KVM_S390_UCONTROL
693         if (rc == SIE_INTERCEPT_UCONTROL) {
694                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
695                 kvm_run->s390_ucontrol.trans_exc_code =
696                         current->thread.gmap_addr;
697                 kvm_run->s390_ucontrol.pgm_code = 0x10;
698                 rc = 0;
699         }
700 #endif
701
702         if (rc == -EOPNOTSUPP) {
703                 /* intercept cannot be handled in-kernel, prepare kvm-run */
704                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
705                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
706                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
707                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
708                 rc = 0;
709         }
710
711         if (rc == -EREMOTE) {
712                 /* intercept was handled, but userspace support is needed
713                  * kvm_run has been prepared by the handler */
714                 rc = 0;
715         }
716
717         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
718         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
719         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
720         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
721
722         if (vcpu->sigset_active)
723                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
724
725         vcpu->stat.exit_userspace++;
726         return rc;
727 }
728
729 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
730                        unsigned long n, int prefix)
731 {
732         if (prefix)
733                 return copy_to_guest(vcpu, guestdest, from, n);
734         else
735                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
736 }
737
738 /*
739  * store status at address
740  * we use have two special cases:
741  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
742  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
743  */
744 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
745 {
746         unsigned char archmode = 1;
747         int prefix;
748
749         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
750                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
751                         return -EFAULT;
752                 addr = SAVE_AREA_BASE;
753                 prefix = 0;
754         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
755                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
756                         return -EFAULT;
757                 addr = SAVE_AREA_BASE;
758                 prefix = 1;
759         } else
760                 prefix = 0;
761
762         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
763                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
764                 return -EFAULT;
765
766         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
767                         vcpu->run->s.regs.gprs, 128, prefix))
768                 return -EFAULT;
769
770         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
771                         &vcpu->arch.sie_block->gpsw, 16, prefix))
772                 return -EFAULT;
773
774         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
775                         &vcpu->arch.sie_block->prefix, 4, prefix))
776                 return -EFAULT;
777
778         if (__guestcopy(vcpu,
779                         addr + offsetof(struct save_area, fp_ctrl_reg),
780                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
781                 return -EFAULT;
782
783         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
784                         &vcpu->arch.sie_block->todpr, 4, prefix))
785                 return -EFAULT;
786
787         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
788                         &vcpu->arch.sie_block->cputm, 8, prefix))
789                 return -EFAULT;
790
791         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
792                         &vcpu->arch.sie_block->ckc, 8, prefix))
793                 return -EFAULT;
794
795         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
796                         &vcpu->run->s.regs.acrs, 64, prefix))
797                 return -EFAULT;
798
799         if (__guestcopy(vcpu,
800                         addr + offsetof(struct save_area, ctrl_regs),
801                         &vcpu->arch.sie_block->gcr, 128, prefix))
802                 return -EFAULT;
803         return 0;
804 }
805
806 long kvm_arch_vcpu_ioctl(struct file *filp,
807                          unsigned int ioctl, unsigned long arg)
808 {
809         struct kvm_vcpu *vcpu = filp->private_data;
810         void __user *argp = (void __user *)arg;
811         long r;
812
813         switch (ioctl) {
814         case KVM_S390_INTERRUPT: {
815                 struct kvm_s390_interrupt s390int;
816
817                 r = -EFAULT;
818                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
819                         break;
820                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
821                 break;
822         }
823         case KVM_S390_STORE_STATUS:
824                 r = kvm_s390_vcpu_store_status(vcpu, arg);
825                 break;
826         case KVM_S390_SET_INITIAL_PSW: {
827                 psw_t psw;
828
829                 r = -EFAULT;
830                 if (copy_from_user(&psw, argp, sizeof(psw)))
831                         break;
832                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
833                 break;
834         }
835         case KVM_S390_INITIAL_RESET:
836                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
837                 break;
838         case KVM_SET_ONE_REG:
839         case KVM_GET_ONE_REG: {
840                 struct kvm_one_reg reg;
841                 r = -EFAULT;
842                 if (copy_from_user(&reg, argp, sizeof(reg)))
843                         break;
844                 if (ioctl == KVM_SET_ONE_REG)
845                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
846                 else
847                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
848                 break;
849         }
850 #ifdef CONFIG_KVM_S390_UCONTROL
851         case KVM_S390_UCAS_MAP: {
852                 struct kvm_s390_ucas_mapping ucasmap;
853
854                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
855                         r = -EFAULT;
856                         break;
857                 }
858
859                 if (!kvm_is_ucontrol(vcpu->kvm)) {
860                         r = -EINVAL;
861                         break;
862                 }
863
864                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
865                                      ucasmap.vcpu_addr, ucasmap.length);
866                 break;
867         }
868         case KVM_S390_UCAS_UNMAP: {
869                 struct kvm_s390_ucas_mapping ucasmap;
870
871                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
872                         r = -EFAULT;
873                         break;
874                 }
875
876                 if (!kvm_is_ucontrol(vcpu->kvm)) {
877                         r = -EINVAL;
878                         break;
879                 }
880
881                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
882                         ucasmap.length);
883                 break;
884         }
885 #endif
886         case KVM_S390_VCPU_FAULT: {
887                 r = gmap_fault(arg, vcpu->arch.gmap);
888                 if (!IS_ERR_VALUE(r))
889                         r = 0;
890                 break;
891         }
892         default:
893                 r = -ENOTTY;
894         }
895         return r;
896 }
897
898 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
899 {
900 #ifdef CONFIG_KVM_S390_UCONTROL
901         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
902                  && (kvm_is_ucontrol(vcpu->kvm))) {
903                 vmf->page = virt_to_page(vcpu->arch.sie_block);
904                 get_page(vmf->page);
905                 return 0;
906         }
907 #endif
908         return VM_FAULT_SIGBUS;
909 }
910
911 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
912                            struct kvm_memory_slot *dont)
913 {
914 }
915
916 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
917 {
918         return 0;
919 }
920
921 /* Section: memory related */
922 int kvm_arch_prepare_memory_region(struct kvm *kvm,
923                                    struct kvm_memory_slot *memslot,
924                                    struct kvm_memory_slot old,
925                                    struct kvm_userspace_memory_region *mem,
926                                    int user_alloc)
927 {
928         /* A few sanity checks. We can have exactly one memory slot which has
929            to start at guest virtual zero and which has to be located at a
930            page boundary in userland and which has to end at a page boundary.
931            The memory in userland is ok to be fragmented into various different
932            vmas. It is okay to mmap() and munmap() stuff in this slot after
933            doing this call at any time */
934
935         if (mem->slot)
936                 return -EINVAL;
937
938         if (mem->guest_phys_addr)
939                 return -EINVAL;
940
941         if (mem->userspace_addr & 0xffffful)
942                 return -EINVAL;
943
944         if (mem->memory_size & 0xffffful)
945                 return -EINVAL;
946
947         if (!user_alloc)
948                 return -EINVAL;
949
950         return 0;
951 }
952
953 void kvm_arch_commit_memory_region(struct kvm *kvm,
954                                 struct kvm_userspace_memory_region *mem,
955                                 struct kvm_memory_slot old,
956                                 int user_alloc)
957 {
958         int rc;
959
960
961         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
962                 mem->guest_phys_addr, mem->memory_size);
963         if (rc)
964                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
965         return;
966 }
967
968 void kvm_arch_flush_shadow_all(struct kvm *kvm)
969 {
970 }
971
972 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
973                                    struct kvm_memory_slot *slot)
974 {
975 }
976
977 static int __init kvm_s390_init(void)
978 {
979         int ret;
980         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
981         if (ret)
982                 return ret;
983
984         /*
985          * guests can ask for up to 255+1 double words, we need a full page
986          * to hold the maximum amount of facilities. On the other hand, we
987          * only set facilities that are known to work in KVM.
988          */
989         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
990         if (!facilities) {
991                 kvm_exit();
992                 return -ENOMEM;
993         }
994         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
995         facilities[0] &= 0xff00fff3f47c0000ULL;
996         facilities[1] &= 0x201c000000000000ULL;
997         return 0;
998 }
999
1000 static void __exit kvm_s390_exit(void)
1001 {
1002         free_page((unsigned long) facilities);
1003         kvm_exit();
1004 }
1005
1006 module_init(kvm_s390_init);
1007 module_exit(kvm_s390_exit);