]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - arch/arm64/kvm/hyp/sysreg-sr.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[zynq/linux.git] / arch / arm64 / kvm / hyp / sysreg-sr.c
1 /*
2  * Copyright (C) 2012-2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/compiler.h>
19 #include <linux/kvm_host.h>
20
21 #include <asm/kprobes.h>
22 #include <asm/kvm_asm.h>
23 #include <asm/kvm_emulate.h>
24 #include <asm/kvm_hyp.h>
25
26 /*
27  * Non-VHE: Both host and guest must save everything.
28  *
29  * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
30  * which are handled as part of the el2 return state) on every switch.
31  * tpidr_el0 and tpidrro_el0 only need to be switched when going
32  * to host userspace or a different VCPU.  EL1 registers only need to be
33  * switched when potentially going to run a different VCPU.  The latter two
34  * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
35  */
36
37 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
38 {
39         ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
40
41         /*
42          * The host arm64 Linux uses sp_el0 to point to 'current' and it must
43          * therefore be saved/restored on every entry/exit to/from the guest.
44          */
45         ctxt->gp_regs.regs.sp           = read_sysreg(sp_el0);
46 }
47
48 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
49 {
50         ctxt->sys_regs[TPIDR_EL0]       = read_sysreg(tpidr_el0);
51         ctxt->sys_regs[TPIDRRO_EL0]     = read_sysreg(tpidrro_el0);
52 }
53
54 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
55 {
56         ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
57         ctxt->sys_regs[SCTLR_EL1]       = read_sysreg_el1(sctlr);
58         ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
59         ctxt->sys_regs[CPACR_EL1]       = read_sysreg_el1(cpacr);
60         ctxt->sys_regs[TTBR0_EL1]       = read_sysreg_el1(ttbr0);
61         ctxt->sys_regs[TTBR1_EL1]       = read_sysreg_el1(ttbr1);
62         ctxt->sys_regs[TCR_EL1]         = read_sysreg_el1(tcr);
63         ctxt->sys_regs[ESR_EL1]         = read_sysreg_el1(esr);
64         ctxt->sys_regs[AFSR0_EL1]       = read_sysreg_el1(afsr0);
65         ctxt->sys_regs[AFSR1_EL1]       = read_sysreg_el1(afsr1);
66         ctxt->sys_regs[FAR_EL1]         = read_sysreg_el1(far);
67         ctxt->sys_regs[MAIR_EL1]        = read_sysreg_el1(mair);
68         ctxt->sys_regs[VBAR_EL1]        = read_sysreg_el1(vbar);
69         ctxt->sys_regs[CONTEXTIDR_EL1]  = read_sysreg_el1(contextidr);
70         ctxt->sys_regs[AMAIR_EL1]       = read_sysreg_el1(amair);
71         ctxt->sys_regs[CNTKCTL_EL1]     = read_sysreg_el1(cntkctl);
72         ctxt->sys_regs[PAR_EL1]         = read_sysreg(par_el1);
73         ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
74
75         ctxt->gp_regs.sp_el1            = read_sysreg(sp_el1);
76         ctxt->gp_regs.elr_el1           = read_sysreg_el1(elr);
77         ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
78 }
79
80 static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
81 {
82         ctxt->gp_regs.regs.pc           = read_sysreg_el2(elr);
83         ctxt->gp_regs.regs.pstate       = read_sysreg_el2(spsr);
84
85         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
86                 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
87 }
88
89 void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
90 {
91         __sysreg_save_el1_state(ctxt);
92         __sysreg_save_common_state(ctxt);
93         __sysreg_save_user_state(ctxt);
94         __sysreg_save_el2_return_state(ctxt);
95 }
96
97 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
98 {
99         __sysreg_save_common_state(ctxt);
100 }
101 NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
102
103 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
104 {
105         __sysreg_save_common_state(ctxt);
106         __sysreg_save_el2_return_state(ctxt);
107 }
108 NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
109
110 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
111 {
112         write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
113
114         /*
115          * The host arm64 Linux uses sp_el0 to point to 'current' and it must
116          * therefore be saved/restored on every entry/exit to/from the guest.
117          */
118         write_sysreg(ctxt->gp_regs.regs.sp,       sp_el0);
119 }
120
121 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
122 {
123         write_sysreg(ctxt->sys_regs[TPIDR_EL0],         tpidr_el0);
124         write_sysreg(ctxt->sys_regs[TPIDRRO_EL0],       tpidrro_el0);
125 }
126
127 static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
128 {
129         write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
130         write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
131         write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     sctlr);
132         write_sysreg(ctxt->sys_regs[ACTLR_EL1],         actlr_el1);
133         write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],     cpacr);
134         write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],     ttbr0);
135         write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],     ttbr1);
136         write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       tcr);
137         write_sysreg_el1(ctxt->sys_regs[ESR_EL1],       esr);
138         write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],     afsr0);
139         write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],     afsr1);
140         write_sysreg_el1(ctxt->sys_regs[FAR_EL1],       far);
141         write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],      mair);
142         write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],      vbar);
143         write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
144         write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],     amair);
145         write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1],   cntkctl);
146         write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
147         write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
148
149         write_sysreg(ctxt->gp_regs.sp_el1,              sp_el1);
150         write_sysreg_el1(ctxt->gp_regs.elr_el1,         elr);
151         write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
152 }
153
154 static void __hyp_text
155 __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
156 {
157         u64 pstate = ctxt->gp_regs.regs.pstate;
158         u64 mode = pstate & PSR_AA32_MODE_MASK;
159
160         /*
161          * Safety check to ensure we're setting the CPU up to enter the guest
162          * in a less privileged mode.
163          *
164          * If we are attempting a return to EL2 or higher in AArch64 state,
165          * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
166          * we'll take an illegal exception state exception immediately after
167          * the ERET to the guest.  Attempts to return to AArch32 Hyp will
168          * result in an illegal exception return because EL2's execution state
169          * is determined by SCR_EL3.RW.
170          */
171         if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
172                 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
173
174         write_sysreg_el2(ctxt->gp_regs.regs.pc,         elr);
175         write_sysreg_el2(pstate,                        spsr);
176
177         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
178                 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
179 }
180
181 void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
182 {
183         __sysreg_restore_el1_state(ctxt);
184         __sysreg_restore_common_state(ctxt);
185         __sysreg_restore_user_state(ctxt);
186         __sysreg_restore_el2_return_state(ctxt);
187 }
188
189 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
190 {
191         __sysreg_restore_common_state(ctxt);
192 }
193 NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
194
195 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
196 {
197         __sysreg_restore_common_state(ctxt);
198         __sysreg_restore_el2_return_state(ctxt);
199 }
200 NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
201
202 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
203 {
204         u64 *spsr, *sysreg;
205
206         if (!vcpu_el1_is_32bit(vcpu))
207                 return;
208
209         spsr = vcpu->arch.ctxt.gp_regs.spsr;
210         sysreg = vcpu->arch.ctxt.sys_regs;
211
212         spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
213         spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
214         spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
215         spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
216
217         sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
218         sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
219
220         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
221                 sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
222 }
223
224 void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
225 {
226         u64 *spsr, *sysreg;
227
228         if (!vcpu_el1_is_32bit(vcpu))
229                 return;
230
231         spsr = vcpu->arch.ctxt.gp_regs.spsr;
232         sysreg = vcpu->arch.ctxt.sys_regs;
233
234         write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
235         write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
236         write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
237         write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
238
239         write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
240         write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
241
242         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
243                 write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
244 }
245
246 /**
247  * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
248  *
249  * @vcpu: The VCPU pointer
250  *
251  * Load system registers that do not affect the host's execution, for
252  * example EL1 system registers on a VHE system where the host kernel
253  * runs at EL2.  This function is called from KVM's vcpu_load() function
254  * and loading system register state early avoids having to load them on
255  * every entry to the VM.
256  */
257 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
258 {
259         struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
260         struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
261
262         if (!has_vhe())
263                 return;
264
265         __sysreg_save_user_state(host_ctxt);
266
267         /*
268          * Load guest EL1 and user state
269          *
270          * We must restore the 32-bit state before the sysregs, thanks
271          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
272          */
273         __sysreg32_restore_state(vcpu);
274         __sysreg_restore_user_state(guest_ctxt);
275         __sysreg_restore_el1_state(guest_ctxt);
276
277         vcpu->arch.sysregs_loaded_on_cpu = true;
278
279         activate_traps_vhe_load(vcpu);
280 }
281
282 /**
283  * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
284  *
285  * @vcpu: The VCPU pointer
286  *
287  * Save guest system registers that do not affect the host's execution, for
288  * example EL1 system registers on a VHE system where the host kernel
289  * runs at EL2.  This function is called from KVM's vcpu_put() function
290  * and deferring saving system register state until we're no longer running the
291  * VCPU avoids having to save them on every exit from the VM.
292  */
293 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
294 {
295         struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
296         struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
297
298         if (!has_vhe())
299                 return;
300
301         deactivate_traps_vhe_put();
302
303         __sysreg_save_el1_state(guest_ctxt);
304         __sysreg_save_user_state(guest_ctxt);
305         __sysreg32_save_state(vcpu);
306
307         /* Restore host user state */
308         __sysreg_restore_user_state(host_ctxt);
309
310         vcpu->arch.sysregs_loaded_on_cpu = false;
311 }
312
313 void __hyp_text __kvm_enable_ssbs(void)
314 {
315         u64 tmp;
316
317         asm volatile(
318         "mrs    %0, sctlr_el2\n"
319         "orr    %0, %0, %1\n"
320         "msr    sctlr_el2, %0"
321         : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
322 }