Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/svm/svm.c
29538 views
1
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3
#include <linux/kvm_host.h>
4
5
#include "irq.h"
6
#include "mmu.h"
7
#include "kvm_cache_regs.h"
8
#include "x86.h"
9
#include "smm.h"
10
#include "cpuid.h"
11
#include "pmu.h"
12
13
#include <linux/module.h>
14
#include <linux/mod_devicetable.h>
15
#include <linux/kernel.h>
16
#include <linux/vmalloc.h>
17
#include <linux/highmem.h>
18
#include <linux/amd-iommu.h>
19
#include <linux/sched.h>
20
#include <linux/trace_events.h>
21
#include <linux/slab.h>
22
#include <linux/hashtable.h>
23
#include <linux/objtool.h>
24
#include <linux/psp-sev.h>
25
#include <linux/file.h>
26
#include <linux/pagemap.h>
27
#include <linux/swap.h>
28
#include <linux/rwsem.h>
29
#include <linux/cc_platform.h>
30
#include <linux/smp.h>
31
#include <linux/string_choices.h>
32
#include <linux/mutex.h>
33
34
#include <asm/apic.h>
35
#include <asm/msr.h>
36
#include <asm/perf_event.h>
37
#include <asm/tlbflush.h>
38
#include <asm/desc.h>
39
#include <asm/debugreg.h>
40
#include <asm/kvm_para.h>
41
#include <asm/irq_remapping.h>
42
#include <asm/spec-ctrl.h>
43
#include <asm/cpu_device_id.h>
44
#include <asm/traps.h>
45
#include <asm/reboot.h>
46
#include <asm/fpu/api.h>
47
48
#include <trace/events/ipi.h>
49
50
#include "trace.h"
51
52
#include "svm.h"
53
#include "svm_ops.h"
54
55
#include "kvm_onhyperv.h"
56
#include "svm_onhyperv.h"
57
58
MODULE_AUTHOR("Qumranet");
59
MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions");
60
MODULE_LICENSE("GPL");
61
62
#ifdef MODULE
63
static const struct x86_cpu_id svm_cpu_id[] = {
64
X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
65
{}
66
};
67
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
68
#endif
69
70
#define SEG_TYPE_LDT 2
71
#define SEG_TYPE_BUSY_TSS16 3
72
73
static bool erratum_383_found __read_mostly;
74
75
/*
76
* Set osvw_len to higher value when updated Revision Guides
77
* are published and we know what the new status bits are
78
*/
79
static uint64_t osvw_len = 4, osvw_status;
80
81
static DEFINE_PER_CPU(u64, current_tsc_ratio);
82
83
/*
84
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
85
* pause_filter_count: On processors that support Pause filtering(indicated
86
* by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
87
* count value. On VMRUN this value is loaded into an internal counter.
88
* Each time a pause instruction is executed, this counter is decremented
89
* until it reaches zero at which time a #VMEXIT is generated if pause
90
* intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
91
* Intercept Filtering for more details.
92
* This also indicate if ple logic enabled.
93
*
94
* pause_filter_thresh: In addition, some processor families support advanced
95
* pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
96
* the amount of time a guest is allowed to execute in a pause loop.
97
* In this mode, a 16-bit pause filter threshold field is added in the
98
* VMCB. The threshold value is a cycle count that is used to reset the
99
* pause counter. As with simple pause filtering, VMRUN loads the pause
100
* count value from VMCB into an internal counter. Then, on each pause
101
* instruction the hardware checks the elapsed number of cycles since
102
* the most recent pause instruction against the pause filter threshold.
103
* If the elapsed cycle count is greater than the pause filter threshold,
104
* then the internal pause count is reloaded from the VMCB and execution
105
* continues. If the elapsed cycle count is less than the pause filter
106
* threshold, then the internal pause count is decremented. If the count
107
* value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
108
* triggered. If advanced pause filtering is supported and pause filter
109
* threshold field is set to zero, the filter will operate in the simpler,
110
* count only mode.
111
*/
112
113
static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
114
module_param(pause_filter_thresh, ushort, 0444);
115
116
static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
117
module_param(pause_filter_count, ushort, 0444);
118
119
/* Default doubles per-vcpu window every exit. */
120
static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
121
module_param(pause_filter_count_grow, ushort, 0444);
122
123
/* Default resets per-vcpu window every exit to pause_filter_count. */
124
static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
125
module_param(pause_filter_count_shrink, ushort, 0444);
126
127
/* Default is to compute the maximum so we can never overflow. */
128
static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
129
module_param(pause_filter_count_max, ushort, 0444);
130
131
/*
132
* Use nested page tables by default. Note, NPT may get forced off by
133
* svm_hardware_setup() if it's unsupported by hardware or the host kernel.
134
*/
135
bool npt_enabled = true;
136
module_param_named(npt, npt_enabled, bool, 0444);
137
138
/* allow nested virtualization in KVM/SVM */
139
static int nested = true;
140
module_param(nested, int, 0444);
141
142
/* enable/disable Next RIP Save */
143
int nrips = true;
144
module_param(nrips, int, 0444);
145
146
/* enable/disable Virtual VMLOAD VMSAVE */
147
static int vls = true;
148
module_param(vls, int, 0444);
149
150
/* enable/disable Virtual GIF */
151
int vgif = true;
152
module_param(vgif, int, 0444);
153
154
/* enable/disable LBR virtualization */
155
int lbrv = true;
156
module_param(lbrv, int, 0444);
157
158
static int tsc_scaling = true;
159
module_param(tsc_scaling, int, 0444);
160
161
module_param(enable_device_posted_irqs, bool, 0444);
162
163
bool __read_mostly dump_invalid_vmcb;
164
module_param(dump_invalid_vmcb, bool, 0644);
165
166
167
bool intercept_smi = true;
168
module_param(intercept_smi, bool, 0444);
169
170
bool vnmi = true;
171
module_param(vnmi, bool, 0444);
172
173
static bool svm_gp_erratum_intercept = true;
174
175
static u8 rsm_ins_bytes[] = "\x0f\xaa";
176
177
static unsigned long iopm_base;
178
179
DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
180
181
static DEFINE_MUTEX(vmcb_dump_mutex);
182
183
/*
184
* Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
185
* the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
186
*
187
* RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
188
* defer the restoration of TSC_AUX until the CPU returns to userspace.
189
*/
190
int tsc_aux_uret_slot __ro_after_init = -1;
191
192
static int get_npt_level(void)
193
{
194
#ifdef CONFIG_X86_64
195
return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
196
#else
197
return PT32E_ROOT_LEVEL;
198
#endif
199
}
200
201
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
202
{
203
struct vcpu_svm *svm = to_svm(vcpu);
204
u64 old_efer = vcpu->arch.efer;
205
vcpu->arch.efer = efer;
206
207
if (!npt_enabled) {
208
/* Shadow paging assumes NX to be available. */
209
efer |= EFER_NX;
210
211
if (!(efer & EFER_LMA))
212
efer &= ~EFER_LME;
213
}
214
215
if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
216
if (!(efer & EFER_SVME)) {
217
svm_leave_nested(vcpu);
218
svm_set_gif(svm, true);
219
/* #GP intercept is still needed for vmware backdoor */
220
if (!enable_vmware_backdoor)
221
clr_exception_intercept(svm, GP_VECTOR);
222
223
/*
224
* Free the nested guest state, unless we are in SMM.
225
* In this case we will return to the nested guest
226
* as soon as we leave SMM.
227
*/
228
if (!is_smm(vcpu))
229
svm_free_nested(svm);
230
231
} else {
232
int ret = svm_allocate_nested(svm);
233
234
if (ret) {
235
vcpu->arch.efer = old_efer;
236
return ret;
237
}
238
239
/*
240
* Never intercept #GP for SEV guests, KVM can't
241
* decrypt guest memory to workaround the erratum.
242
*/
243
if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
244
set_exception_intercept(svm, GP_VECTOR);
245
}
246
}
247
248
svm->vmcb->save.efer = efer | EFER_SVME;
249
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
250
return 0;
251
}
252
253
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
254
{
255
struct vcpu_svm *svm = to_svm(vcpu);
256
u32 ret = 0;
257
258
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
259
ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
260
return ret;
261
}
262
263
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
264
{
265
struct vcpu_svm *svm = to_svm(vcpu);
266
267
if (mask == 0)
268
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
269
else
270
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
271
272
}
273
274
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
275
bool commit_side_effects)
276
{
277
struct vcpu_svm *svm = to_svm(vcpu);
278
unsigned long old_rflags;
279
280
/*
281
* SEV-ES does not expose the next RIP. The RIP update is controlled by
282
* the type of exit and the #VC handler in the guest.
283
*/
284
if (sev_es_guest(vcpu->kvm))
285
goto done;
286
287
if (nrips && svm->vmcb->control.next_rip != 0) {
288
WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
289
svm->next_rip = svm->vmcb->control.next_rip;
290
}
291
292
if (!svm->next_rip) {
293
if (unlikely(!commit_side_effects))
294
old_rflags = svm->vmcb->save.rflags;
295
296
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
297
return 0;
298
299
if (unlikely(!commit_side_effects))
300
svm->vmcb->save.rflags = old_rflags;
301
} else {
302
kvm_rip_write(vcpu, svm->next_rip);
303
}
304
305
done:
306
if (likely(commit_side_effects))
307
svm_set_interrupt_shadow(vcpu, 0);
308
309
return 1;
310
}
311
312
static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
313
{
314
return __svm_skip_emulated_instruction(vcpu, true);
315
}
316
317
static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
318
{
319
unsigned long rip, old_rip = kvm_rip_read(vcpu);
320
struct vcpu_svm *svm = to_svm(vcpu);
321
322
/*
323
* Due to architectural shortcomings, the CPU doesn't always provide
324
* NextRIP, e.g. if KVM intercepted an exception that occurred while
325
* the CPU was vectoring an INTO/INT3 in the guest. Temporarily skip
326
* the instruction even if NextRIP is supported to acquire the next
327
* RIP so that it can be shoved into the NextRIP field, otherwise
328
* hardware will fail to advance guest RIP during event injection.
329
* Drop the exception/interrupt if emulation fails and effectively
330
* retry the instruction, it's the least awful option. If NRIPS is
331
* in use, the skip must not commit any side effects such as clearing
332
* the interrupt shadow or RFLAGS.RF.
333
*/
334
if (!__svm_skip_emulated_instruction(vcpu, !nrips))
335
return -EIO;
336
337
rip = kvm_rip_read(vcpu);
338
339
/*
340
* Save the injection information, even when using next_rip, as the
341
* VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
342
* doesn't complete due to a VM-Exit occurring while the CPU is
343
* vectoring the event. Decoding the instruction isn't guaranteed to
344
* work as there may be no backing instruction, e.g. if the event is
345
* being injected by L1 for L2, or if the guest is patching INT3 into
346
* a different instruction.
347
*/
348
svm->soft_int_injected = true;
349
svm->soft_int_csbase = svm->vmcb->save.cs.base;
350
svm->soft_int_old_rip = old_rip;
351
svm->soft_int_next_rip = rip;
352
353
if (nrips)
354
kvm_rip_write(vcpu, old_rip);
355
356
if (static_cpu_has(X86_FEATURE_NRIPS))
357
svm->vmcb->control.next_rip = rip;
358
359
return 0;
360
}
361
362
static void svm_inject_exception(struct kvm_vcpu *vcpu)
363
{
364
struct kvm_queued_exception *ex = &vcpu->arch.exception;
365
struct vcpu_svm *svm = to_svm(vcpu);
366
367
kvm_deliver_exception_payload(vcpu, ex);
368
369
if (kvm_exception_is_soft(ex->vector) &&
370
svm_update_soft_interrupt_rip(vcpu))
371
return;
372
373
svm->vmcb->control.event_inj = ex->vector
374
| SVM_EVTINJ_VALID
375
| (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
376
| SVM_EVTINJ_TYPE_EXEPT;
377
svm->vmcb->control.event_inj_err = ex->error_code;
378
}
379
380
static void svm_init_erratum_383(void)
381
{
382
u64 val;
383
384
if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
385
return;
386
387
/* Use _safe variants to not break nested virtualization */
388
if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val))
389
return;
390
391
val |= (1ULL << 47);
392
393
native_write_msr_safe(MSR_AMD64_DC_CFG, val);
394
395
erratum_383_found = true;
396
}
397
398
static void svm_init_osvw(struct kvm_vcpu *vcpu)
399
{
400
/*
401
* Guests should see errata 400 and 415 as fixed (assuming that
402
* HLT and IO instructions are intercepted).
403
*/
404
vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
405
vcpu->arch.osvw.status = osvw_status & ~(6ULL);
406
407
/*
408
* By increasing VCPU's osvw.length to 3 we are telling the guest that
409
* all osvw.status bits inside that length, including bit 0 (which is
410
* reserved for erratum 298), are valid. However, if host processor's
411
* osvw_len is 0 then osvw_status[0] carries no information. We need to
412
* be conservative here and therefore we tell the guest that erratum 298
413
* is present (because we really don't know).
414
*/
415
if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
416
vcpu->arch.osvw.status |= 1;
417
}
418
419
static bool __kvm_is_svm_supported(void)
420
{
421
int cpu = smp_processor_id();
422
struct cpuinfo_x86 *c = &cpu_data(cpu);
423
424
if (c->x86_vendor != X86_VENDOR_AMD &&
425
c->x86_vendor != X86_VENDOR_HYGON) {
426
pr_err("CPU %d isn't AMD or Hygon\n", cpu);
427
return false;
428
}
429
430
if (!cpu_has(c, X86_FEATURE_SVM)) {
431
pr_err("SVM not supported by CPU %d\n", cpu);
432
return false;
433
}
434
435
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
436
pr_info("KVM is unsupported when running as an SEV guest\n");
437
return false;
438
}
439
440
return true;
441
}
442
443
static bool kvm_is_svm_supported(void)
444
{
445
bool supported;
446
447
migrate_disable();
448
supported = __kvm_is_svm_supported();
449
migrate_enable();
450
451
return supported;
452
}
453
454
static int svm_check_processor_compat(void)
455
{
456
if (!__kvm_is_svm_supported())
457
return -EIO;
458
459
return 0;
460
}
461
462
static void __svm_write_tsc_multiplier(u64 multiplier)
463
{
464
if (multiplier == __this_cpu_read(current_tsc_ratio))
465
return;
466
467
wrmsrq(MSR_AMD64_TSC_RATIO, multiplier);
468
__this_cpu_write(current_tsc_ratio, multiplier);
469
}
470
471
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
472
{
473
return &sd->save_area->host_sev_es_save;
474
}
475
476
static inline void kvm_cpu_svm_disable(void)
477
{
478
uint64_t efer;
479
480
wrmsrq(MSR_VM_HSAVE_PA, 0);
481
rdmsrq(MSR_EFER, efer);
482
if (efer & EFER_SVME) {
483
/*
484
* Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
485
* NMI aren't blocked.
486
*/
487
stgi();
488
wrmsrq(MSR_EFER, efer & ~EFER_SVME);
489
}
490
}
491
492
static void svm_emergency_disable_virtualization_cpu(void)
493
{
494
kvm_rebooting = true;
495
496
kvm_cpu_svm_disable();
497
}
498
499
static void svm_disable_virtualization_cpu(void)
500
{
501
/* Make sure we clean up behind us */
502
if (tsc_scaling)
503
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
504
505
kvm_cpu_svm_disable();
506
507
amd_pmu_disable_virt();
508
}
509
510
static int svm_enable_virtualization_cpu(void)
511
{
512
513
struct svm_cpu_data *sd;
514
uint64_t efer;
515
int me = raw_smp_processor_id();
516
517
rdmsrq(MSR_EFER, efer);
518
if (efer & EFER_SVME)
519
return -EBUSY;
520
521
sd = per_cpu_ptr(&svm_data, me);
522
sd->asid_generation = 1;
523
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
524
sd->next_asid = sd->max_asid + 1;
525
sd->min_asid = max_sev_asid + 1;
526
527
wrmsrq(MSR_EFER, efer | EFER_SVME);
528
529
wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
530
531
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
532
/*
533
* Set the default value, even if we don't use TSC scaling
534
* to avoid having stale value in the msr
535
*/
536
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
537
}
538
539
540
/*
541
* Get OSVW bits.
542
*
543
* Note that it is possible to have a system with mixed processor
544
* revisions and therefore different OSVW bits. If bits are not the same
545
* on different processors then choose the worst case (i.e. if erratum
546
* is present on one processor and not on another then assume that the
547
* erratum is present everywhere).
548
*/
549
if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
550
u64 len, status = 0;
551
int err;
552
553
err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len);
554
if (!err)
555
err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status);
556
557
if (err)
558
osvw_status = osvw_len = 0;
559
else {
560
if (len < osvw_len)
561
osvw_len = len;
562
osvw_status |= status;
563
osvw_status &= (1ULL << osvw_len) - 1;
564
}
565
} else
566
osvw_status = osvw_len = 0;
567
568
svm_init_erratum_383();
569
570
amd_pmu_enable_virt();
571
572
return 0;
573
}
574
575
static void svm_cpu_uninit(int cpu)
576
{
577
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
578
579
if (!sd->save_area)
580
return;
581
582
kfree(sd->sev_vmcbs);
583
__free_page(__sme_pa_to_page(sd->save_area_pa));
584
sd->save_area_pa = 0;
585
sd->save_area = NULL;
586
}
587
588
static int svm_cpu_init(int cpu)
589
{
590
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
591
struct page *save_area_page;
592
int ret = -ENOMEM;
593
594
memset(sd, 0, sizeof(struct svm_cpu_data));
595
save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
596
if (!save_area_page)
597
return ret;
598
599
ret = sev_cpu_init(sd);
600
if (ret)
601
goto free_save_area;
602
603
sd->save_area = page_address(save_area_page);
604
sd->save_area_pa = __sme_page_pa(save_area_page);
605
return 0;
606
607
free_save_area:
608
__free_page(save_area_page);
609
return ret;
610
611
}
612
613
static void set_dr_intercepts(struct vcpu_svm *svm)
614
{
615
struct vmcb *vmcb = svm->vmcb01.ptr;
616
617
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
618
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
619
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
620
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
621
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
622
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
623
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
624
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
625
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
626
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
627
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
628
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
629
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
630
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
631
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
632
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
633
634
recalc_intercepts(svm);
635
}
636
637
static void clr_dr_intercepts(struct vcpu_svm *svm)
638
{
639
struct vmcb *vmcb = svm->vmcb01.ptr;
640
641
vmcb->control.intercepts[INTERCEPT_DR] = 0;
642
643
recalc_intercepts(svm);
644
}
645
646
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
647
{
648
/*
649
* For non-nested case:
650
* If the L01 MSR bitmap does not intercept the MSR, then we need to
651
* save it.
652
*
653
* For nested case:
654
* If the L02 MSR bitmap does not intercept the MSR, then we need to
655
* save it.
656
*/
657
void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm :
658
to_svm(vcpu)->msrpm;
659
660
return svm_test_msr_bitmap_write(msrpm, msr);
661
}
662
663
void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
664
{
665
struct vcpu_svm *svm = to_svm(vcpu);
666
void *msrpm = svm->msrpm;
667
668
/* Don't disable interception for MSRs userspace wants to handle. */
669
if (type & MSR_TYPE_R) {
670
if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
671
svm_clear_msr_bitmap_read(msrpm, msr);
672
else
673
svm_set_msr_bitmap_read(msrpm, msr);
674
}
675
676
if (type & MSR_TYPE_W) {
677
if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
678
svm_clear_msr_bitmap_write(msrpm, msr);
679
else
680
svm_set_msr_bitmap_write(msrpm, msr);
681
}
682
683
svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
684
svm->nested.force_msr_bitmap_recalc = true;
685
}
686
687
void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
688
{
689
unsigned int order = get_order(size);
690
struct page *pages = alloc_pages(gfp_mask, order);
691
void *pm;
692
693
if (!pages)
694
return NULL;
695
696
/*
697
* Set all bits in the permissions map so that all MSR and I/O accesses
698
* are intercepted by default.
699
*/
700
pm = page_address(pages);
701
memset(pm, 0xff, PAGE_SIZE * (1 << order));
702
703
return pm;
704
}
705
706
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
707
{
708
bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
709
710
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
711
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
712
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTFROMIP, MSR_TYPE_RW, intercept);
713
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTINTTOIP, MSR_TYPE_RW, intercept);
714
715
if (sev_es_guest(vcpu->kvm))
716
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
717
}
718
719
void svm_vcpu_free_msrpm(void *msrpm)
720
{
721
__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
722
}
723
724
static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
725
{
726
struct vcpu_svm *svm = to_svm(vcpu);
727
728
svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
729
svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
730
731
#ifdef CONFIG_X86_64
732
svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
733
svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
734
svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
735
svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
736
svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
737
svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
738
#endif
739
740
if (lbrv)
741
svm_recalc_lbr_msr_intercepts(vcpu);
742
743
if (cpu_feature_enabled(X86_FEATURE_IBPB))
744
svm_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
745
!guest_has_pred_cmd_msr(vcpu));
746
747
if (cpu_feature_enabled(X86_FEATURE_FLUSH_L1D))
748
svm_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
749
!guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
750
751
/*
752
* Disable interception of SPEC_CTRL if KVM doesn't need to manually
753
* context switch the MSR (SPEC_CTRL is virtualized by the CPU), or if
754
* the guest has a non-zero SPEC_CTRL value, i.e. is likely actively
755
* using SPEC_CTRL.
756
*/
757
if (cpu_feature_enabled(X86_FEATURE_V_SPEC_CTRL))
758
svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
759
!guest_has_spec_ctrl_msr(vcpu));
760
else
761
svm_set_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW,
762
!svm->spec_ctrl);
763
764
/*
765
* Intercept SYSENTER_EIP and SYSENTER_ESP when emulating an Intel CPU,
766
* as AMD hardware only store 32 bits, whereas Intel CPUs track 64 bits.
767
*/
768
svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW,
769
guest_cpuid_is_intel_compatible(vcpu));
770
svm_set_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW,
771
guest_cpuid_is_intel_compatible(vcpu));
772
773
if (kvm_aperfmperf_in_guest(vcpu->kvm)) {
774
svm_disable_intercept_for_msr(vcpu, MSR_IA32_APERF, MSR_TYPE_R);
775
svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
776
}
777
778
if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
779
bool shstk_enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
780
781
svm_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, !shstk_enabled);
782
svm_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, !shstk_enabled);
783
svm_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, !shstk_enabled);
784
svm_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, !shstk_enabled);
785
svm_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, !shstk_enabled);
786
svm_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, !shstk_enabled);
787
}
788
789
if (sev_es_guest(vcpu->kvm))
790
sev_es_recalc_msr_intercepts(vcpu);
791
792
/*
793
* x2APIC intercepts are modified on-demand and cannot be filtered by
794
* userspace.
795
*/
796
}
797
798
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
799
{
800
to_vmcb->save.dbgctl = from_vmcb->save.dbgctl;
801
to_vmcb->save.br_from = from_vmcb->save.br_from;
802
to_vmcb->save.br_to = from_vmcb->save.br_to;
803
to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from;
804
to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to;
805
806
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
807
}
808
809
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
810
{
811
struct vcpu_svm *svm = to_svm(vcpu);
812
813
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
814
svm_recalc_lbr_msr_intercepts(vcpu);
815
816
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
817
if (is_guest_mode(vcpu))
818
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
819
}
820
821
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
822
{
823
struct vcpu_svm *svm = to_svm(vcpu);
824
825
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
826
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
827
svm_recalc_lbr_msr_intercepts(vcpu);
828
829
/*
830
* Move the LBR msrs back to the vmcb01 to avoid copying them
831
* on nested guest entries.
832
*/
833
if (is_guest_mode(vcpu))
834
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
835
}
836
837
static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
838
{
839
/*
840
* If LBR virtualization is disabled, the LBR MSRs are always kept in
841
* vmcb01. If LBR virtualization is enabled and L1 is running VMs of
842
* its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
843
*/
844
return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
845
svm->vmcb01.ptr;
846
}
847
848
void svm_update_lbrv(struct kvm_vcpu *vcpu)
849
{
850
struct vcpu_svm *svm = to_svm(vcpu);
851
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
852
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
853
(is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
854
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
855
856
if (enable_lbrv == current_enable_lbrv)
857
return;
858
859
if (enable_lbrv)
860
svm_enable_lbrv(vcpu);
861
else
862
svm_disable_lbrv(vcpu);
863
}
864
865
void disable_nmi_singlestep(struct vcpu_svm *svm)
866
{
867
svm->nmi_singlestep = false;
868
869
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
870
/* Clear our flags if they were not set by the guest */
871
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
872
svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
873
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
874
svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
875
}
876
}
877
878
static void grow_ple_window(struct kvm_vcpu *vcpu)
879
{
880
struct vcpu_svm *svm = to_svm(vcpu);
881
struct vmcb_control_area *control = &svm->vmcb->control;
882
int old = control->pause_filter_count;
883
884
if (kvm_pause_in_guest(vcpu->kvm))
885
return;
886
887
control->pause_filter_count = __grow_ple_window(old,
888
pause_filter_count,
889
pause_filter_count_grow,
890
pause_filter_count_max);
891
892
if (control->pause_filter_count != old) {
893
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
894
trace_kvm_ple_window_update(vcpu->vcpu_id,
895
control->pause_filter_count, old);
896
}
897
}
898
899
static void shrink_ple_window(struct kvm_vcpu *vcpu)
900
{
901
struct vcpu_svm *svm = to_svm(vcpu);
902
struct vmcb_control_area *control = &svm->vmcb->control;
903
int old = control->pause_filter_count;
904
905
if (kvm_pause_in_guest(vcpu->kvm))
906
return;
907
908
control->pause_filter_count =
909
__shrink_ple_window(old,
910
pause_filter_count,
911
pause_filter_count_shrink,
912
pause_filter_count);
913
if (control->pause_filter_count != old) {
914
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
915
trace_kvm_ple_window_update(vcpu->vcpu_id,
916
control->pause_filter_count, old);
917
}
918
}
919
920
static void svm_hardware_unsetup(void)
921
{
922
int cpu;
923
924
sev_hardware_unsetup();
925
926
for_each_possible_cpu(cpu)
927
svm_cpu_uninit(cpu);
928
929
__free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
930
iopm_base = 0;
931
}
932
933
static void init_seg(struct vmcb_seg *seg)
934
{
935
seg->selector = 0;
936
seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
937
SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
938
seg->limit = 0xffff;
939
seg->base = 0;
940
}
941
942
static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
943
{
944
seg->selector = 0;
945
seg->attrib = SVM_SELECTOR_P_MASK | type;
946
seg->limit = 0xffff;
947
seg->base = 0;
948
}
949
950
static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
951
{
952
struct vcpu_svm *svm = to_svm(vcpu);
953
954
return svm->nested.ctl.tsc_offset;
955
}
956
957
static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
958
{
959
struct vcpu_svm *svm = to_svm(vcpu);
960
961
return svm->tsc_ratio_msr;
962
}
963
964
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
965
{
966
struct vcpu_svm *svm = to_svm(vcpu);
967
968
svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
969
svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
970
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
971
}
972
973
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
974
{
975
preempt_disable();
976
if (to_svm(vcpu)->guest_state_loaded)
977
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
978
preempt_enable();
979
}
980
981
/* Evaluate instruction intercepts that depend on guest CPUID features. */
982
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
983
{
984
struct vcpu_svm *svm = to_svm(vcpu);
985
986
/*
987
* Intercept INVPCID if shadow paging is enabled to sync/free shadow
988
* roots, or if INVPCID is disabled in the guest to inject #UD.
989
*/
990
if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
991
if (!npt_enabled ||
992
!guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_INVPCID))
993
svm_set_intercept(svm, INTERCEPT_INVPCID);
994
else
995
svm_clr_intercept(svm, INTERCEPT_INVPCID);
996
}
997
998
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
999
if (guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP))
1000
svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1001
else
1002
svm_set_intercept(svm, INTERCEPT_RDTSCP);
1003
}
1004
1005
if (guest_cpuid_is_intel_compatible(vcpu)) {
1006
svm_set_intercept(svm, INTERCEPT_VMLOAD);
1007
svm_set_intercept(svm, INTERCEPT_VMSAVE);
1008
svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1009
} else {
1010
/*
1011
* If hardware supports Virtual VMLOAD VMSAVE then enable it
1012
* in VMCB and clear intercepts to avoid #VMEXIT.
1013
*/
1014
if (vls) {
1015
svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1016
svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1017
svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1018
}
1019
}
1020
}
1021
1022
static void svm_recalc_intercepts(struct kvm_vcpu *vcpu)
1023
{
1024
svm_recalc_instruction_intercepts(vcpu);
1025
svm_recalc_msr_intercepts(vcpu);
1026
}
1027
1028
static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
1029
{
1030
struct vcpu_svm *svm = to_svm(vcpu);
1031
struct vmcb *vmcb = svm->vmcb01.ptr;
1032
struct vmcb_control_area *control = &vmcb->control;
1033
struct vmcb_save_area *save = &vmcb->save;
1034
1035
svm_set_intercept(svm, INTERCEPT_CR0_READ);
1036
svm_set_intercept(svm, INTERCEPT_CR3_READ);
1037
svm_set_intercept(svm, INTERCEPT_CR4_READ);
1038
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1039
svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1040
svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1041
if (!kvm_vcpu_apicv_active(vcpu))
1042
svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1043
1044
set_dr_intercepts(svm);
1045
1046
set_exception_intercept(svm, PF_VECTOR);
1047
set_exception_intercept(svm, UD_VECTOR);
1048
set_exception_intercept(svm, MC_VECTOR);
1049
set_exception_intercept(svm, AC_VECTOR);
1050
set_exception_intercept(svm, DB_VECTOR);
1051
/*
1052
* Guest access to VMware backdoor ports could legitimately
1053
* trigger #GP because of TSS I/O permission bitmap.
1054
* We intercept those #GP and allow access to them anyway
1055
* as VMware does.
1056
*/
1057
if (enable_vmware_backdoor)
1058
set_exception_intercept(svm, GP_VECTOR);
1059
1060
svm_set_intercept(svm, INTERCEPT_INTR);
1061
svm_set_intercept(svm, INTERCEPT_NMI);
1062
1063
if (intercept_smi)
1064
svm_set_intercept(svm, INTERCEPT_SMI);
1065
1066
svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1067
svm_set_intercept(svm, INTERCEPT_RDPMC);
1068
svm_set_intercept(svm, INTERCEPT_CPUID);
1069
svm_set_intercept(svm, INTERCEPT_INVD);
1070
svm_set_intercept(svm, INTERCEPT_INVLPG);
1071
svm_set_intercept(svm, INTERCEPT_INVLPGA);
1072
svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1073
svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1074
svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1075
svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1076
svm_set_intercept(svm, INTERCEPT_VMRUN);
1077
svm_set_intercept(svm, INTERCEPT_VMMCALL);
1078
svm_set_intercept(svm, INTERCEPT_VMLOAD);
1079
svm_set_intercept(svm, INTERCEPT_VMSAVE);
1080
svm_set_intercept(svm, INTERCEPT_STGI);
1081
svm_set_intercept(svm, INTERCEPT_CLGI);
1082
svm_set_intercept(svm, INTERCEPT_SKINIT);
1083
svm_set_intercept(svm, INTERCEPT_WBINVD);
1084
svm_set_intercept(svm, INTERCEPT_XSETBV);
1085
svm_set_intercept(svm, INTERCEPT_RDPRU);
1086
svm_set_intercept(svm, INTERCEPT_RSM);
1087
1088
if (!kvm_mwait_in_guest(vcpu->kvm)) {
1089
svm_set_intercept(svm, INTERCEPT_MONITOR);
1090
svm_set_intercept(svm, INTERCEPT_MWAIT);
1091
}
1092
1093
if (!kvm_hlt_in_guest(vcpu->kvm)) {
1094
if (cpu_feature_enabled(X86_FEATURE_IDLE_HLT))
1095
svm_set_intercept(svm, INTERCEPT_IDLE_HLT);
1096
else
1097
svm_set_intercept(svm, INTERCEPT_HLT);
1098
}
1099
1100
control->iopm_base_pa = iopm_base;
1101
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1102
control->int_ctl = V_INTR_MASKING_MASK;
1103
1104
init_seg(&save->es);
1105
init_seg(&save->ss);
1106
init_seg(&save->ds);
1107
init_seg(&save->fs);
1108
init_seg(&save->gs);
1109
1110
save->cs.selector = 0xf000;
1111
save->cs.base = 0xffff0000;
1112
/* Executable/Readable Code Segment */
1113
save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1114
SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1115
save->cs.limit = 0xffff;
1116
1117
save->gdtr.base = 0;
1118
save->gdtr.limit = 0xffff;
1119
save->idtr.base = 0;
1120
save->idtr.limit = 0xffff;
1121
1122
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1123
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1124
1125
if (npt_enabled) {
1126
/* Setup VMCB for Nested Paging */
1127
control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1128
svm_clr_intercept(svm, INTERCEPT_INVLPG);
1129
clr_exception_intercept(svm, PF_VECTOR);
1130
svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1131
svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1132
save->g_pat = vcpu->arch.pat;
1133
save->cr3 = 0;
1134
}
1135
svm->current_vmcb->asid_generation = 0;
1136
svm->asid = 0;
1137
1138
svm->nested.vmcb12_gpa = INVALID_GPA;
1139
svm->nested.last_vmcb12_gpa = INVALID_GPA;
1140
1141
if (!kvm_pause_in_guest(vcpu->kvm)) {
1142
control->pause_filter_count = pause_filter_count;
1143
if (pause_filter_thresh)
1144
control->pause_filter_thresh = pause_filter_thresh;
1145
svm_set_intercept(svm, INTERCEPT_PAUSE);
1146
} else {
1147
svm_clr_intercept(svm, INTERCEPT_PAUSE);
1148
}
1149
1150
if (kvm_vcpu_apicv_active(vcpu))
1151
avic_init_vmcb(svm, vmcb);
1152
1153
if (vnmi)
1154
svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1155
1156
if (vgif) {
1157
svm_clr_intercept(svm, INTERCEPT_STGI);
1158
svm_clr_intercept(svm, INTERCEPT_CLGI);
1159
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1160
}
1161
1162
if (vcpu->kvm->arch.bus_lock_detection_enabled)
1163
svm_set_intercept(svm, INTERCEPT_BUSLOCK);
1164
1165
if (sev_guest(vcpu->kvm))
1166
sev_init_vmcb(svm, init_event);
1167
1168
svm_hv_init_vmcb(vmcb);
1169
1170
kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
1171
1172
vmcb_mark_all_dirty(vmcb);
1173
1174
enable_gif(svm);
1175
}
1176
1177
static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1178
{
1179
struct vcpu_svm *svm = to_svm(vcpu);
1180
1181
svm_init_osvw(vcpu);
1182
1183
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
1184
vcpu->arch.microcode_version = 0x01000065;
1185
svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1186
1187
svm->nmi_masked = false;
1188
svm->awaiting_iret_completion = false;
1189
}
1190
1191
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1192
{
1193
struct vcpu_svm *svm = to_svm(vcpu);
1194
1195
svm->spec_ctrl = 0;
1196
svm->virt_spec_ctrl = 0;
1197
1198
init_vmcb(vcpu, init_event);
1199
1200
if (!init_event)
1201
__svm_vcpu_reset(vcpu);
1202
}
1203
1204
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1205
{
1206
svm->current_vmcb = target_vmcb;
1207
svm->vmcb = target_vmcb->ptr;
1208
}
1209
1210
static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1211
{
1212
struct vcpu_svm *svm;
1213
struct page *vmcb01_page;
1214
int err;
1215
1216
BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1217
svm = to_svm(vcpu);
1218
1219
err = -ENOMEM;
1220
vmcb01_page = snp_safe_alloc_page();
1221
if (!vmcb01_page)
1222
goto out;
1223
1224
err = sev_vcpu_create(vcpu);
1225
if (err)
1226
goto error_free_vmcb_page;
1227
1228
err = avic_init_vcpu(svm);
1229
if (err)
1230
goto error_free_sev;
1231
1232
svm->msrpm = svm_vcpu_alloc_msrpm();
1233
if (!svm->msrpm) {
1234
err = -ENOMEM;
1235
goto error_free_sev;
1236
}
1237
1238
svm->x2avic_msrs_intercepted = true;
1239
1240
svm->vmcb01.ptr = page_address(vmcb01_page);
1241
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1242
svm_switch_vmcb(svm, &svm->vmcb01);
1243
1244
svm->guest_state_loaded = false;
1245
1246
return 0;
1247
1248
error_free_sev:
1249
sev_free_vcpu(vcpu);
1250
error_free_vmcb_page:
1251
__free_page(vmcb01_page);
1252
out:
1253
return err;
1254
}
1255
1256
static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1257
{
1258
struct vcpu_svm *svm = to_svm(vcpu);
1259
1260
WARN_ON_ONCE(!list_empty(&svm->ir_list));
1261
1262
svm_leave_nested(vcpu);
1263
svm_free_nested(svm);
1264
1265
sev_free_vcpu(vcpu);
1266
1267
__free_page(__sme_pa_to_page(svm->vmcb01.pa));
1268
svm_vcpu_free_msrpm(svm->msrpm);
1269
}
1270
1271
#ifdef CONFIG_CPU_MITIGATIONS
1272
static DEFINE_SPINLOCK(srso_lock);
1273
static atomic_t srso_nr_vms;
1274
1275
static void svm_srso_clear_bp_spec_reduce(void *ign)
1276
{
1277
struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
1278
1279
if (!sd->bp_spec_reduce_set)
1280
return;
1281
1282
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
1283
sd->bp_spec_reduce_set = false;
1284
}
1285
1286
static void svm_srso_vm_destroy(void)
1287
{
1288
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
1289
return;
1290
1291
if (atomic_dec_return(&srso_nr_vms))
1292
return;
1293
1294
guard(spinlock)(&srso_lock);
1295
1296
/*
1297
* Verify a new VM didn't come along, acquire the lock, and increment
1298
* the count before this task acquired the lock.
1299
*/
1300
if (atomic_read(&srso_nr_vms))
1301
return;
1302
1303
on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
1304
}
1305
1306
static void svm_srso_vm_init(void)
1307
{
1308
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
1309
return;
1310
1311
/*
1312
* Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
1313
* transition, i.e. destroying the last VM, is fully complete, e.g. so
1314
* that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
1315
*/
1316
if (atomic_inc_not_zero(&srso_nr_vms))
1317
return;
1318
1319
guard(spinlock)(&srso_lock);
1320
1321
atomic_inc(&srso_nr_vms);
1322
}
1323
#else
1324
static void svm_srso_vm_init(void) { }
1325
static void svm_srso_vm_destroy(void) { }
1326
#endif
1327
1328
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1329
{
1330
struct vcpu_svm *svm = to_svm(vcpu);
1331
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1332
1333
if (sev_es_guest(vcpu->kvm))
1334
sev_es_unmap_ghcb(svm);
1335
1336
if (svm->guest_state_loaded)
1337
return;
1338
1339
/*
1340
* Save additional host state that will be restored on VMEXIT (sev-es)
1341
* or subsequent vmload of host save area.
1342
*/
1343
vmsave(sd->save_area_pa);
1344
if (sev_es_guest(vcpu->kvm))
1345
sev_es_prepare_switch_to_guest(svm, sev_es_host_save_area(sd));
1346
1347
if (tsc_scaling)
1348
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1349
1350
/*
1351
* TSC_AUX is always virtualized (context switched by hardware) for
1352
* SEV-ES guests when the feature is available. For non-SEV-ES guests,
1353
* context switch TSC_AUX via the user_return MSR infrastructure (not
1354
* all CPUs support TSC_AUX virtualization).
1355
*/
1356
if (likely(tsc_aux_uret_slot >= 0) &&
1357
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
1358
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1359
1360
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
1361
!sd->bp_spec_reduce_set) {
1362
sd->bp_spec_reduce_set = true;
1363
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
1364
}
1365
svm->guest_state_loaded = true;
1366
}
1367
1368
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1369
{
1370
to_svm(vcpu)->guest_state_loaded = false;
1371
}
1372
1373
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1374
{
1375
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1376
shrink_ple_window(vcpu);
1377
1378
if (kvm_vcpu_apicv_active(vcpu))
1379
avic_vcpu_load(vcpu, cpu);
1380
}
1381
1382
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1383
{
1384
if (kvm_vcpu_apicv_active(vcpu))
1385
avic_vcpu_put(vcpu);
1386
1387
svm_prepare_host_switch(vcpu);
1388
1389
++vcpu->stat.host_state_reload;
1390
}
1391
1392
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1393
{
1394
struct vcpu_svm *svm = to_svm(vcpu);
1395
unsigned long rflags = svm->vmcb->save.rflags;
1396
1397
if (svm->nmi_singlestep) {
1398
/* Hide our flags if they were not set by the guest */
1399
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1400
rflags &= ~X86_EFLAGS_TF;
1401
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1402
rflags &= ~X86_EFLAGS_RF;
1403
}
1404
return rflags;
1405
}
1406
1407
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1408
{
1409
if (to_svm(vcpu)->nmi_singlestep)
1410
rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1411
1412
/*
1413
* Any change of EFLAGS.VM is accompanied by a reload of SS
1414
* (caused by either a task switch or an inter-privilege IRET),
1415
* so we do not need to update the CPL here.
1416
*/
1417
to_svm(vcpu)->vmcb->save.rflags = rflags;
1418
}
1419
1420
static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1421
{
1422
struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1423
1424
return sev_es_guest(vcpu->kvm)
1425
? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1426
: kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1427
}
1428
1429
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1430
{
1431
kvm_register_mark_available(vcpu, reg);
1432
1433
switch (reg) {
1434
case VCPU_EXREG_PDPTR:
1435
/*
1436
* When !npt_enabled, mmu->pdptrs[] is already available since
1437
* it is always updated per SDM when moving to CRs.
1438
*/
1439
if (npt_enabled)
1440
load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1441
break;
1442
default:
1443
KVM_BUG_ON(1, vcpu->kvm);
1444
}
1445
}
1446
1447
static void svm_set_vintr(struct vcpu_svm *svm)
1448
{
1449
struct vmcb_control_area *control;
1450
1451
/*
1452
* The following fields are ignored when AVIC is enabled
1453
*/
1454
WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1455
1456
svm_set_intercept(svm, INTERCEPT_VINTR);
1457
1458
/*
1459
* Recalculating intercepts may have cleared the VINTR intercept. If
1460
* V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1461
* for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1462
* Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1463
* interrupts will never be unblocked while L2 is running.
1464
*/
1465
if (!svm_is_intercept(svm, INTERCEPT_VINTR))
1466
return;
1467
1468
/*
1469
* This is just a dummy VINTR to actually cause a vmexit to happen.
1470
* Actual injection of virtual interrupts happens through EVENTINJ.
1471
*/
1472
control = &svm->vmcb->control;
1473
control->int_vector = 0x0;
1474
control->int_ctl &= ~V_INTR_PRIO_MASK;
1475
control->int_ctl |= V_IRQ_MASK |
1476
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1477
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1478
}
1479
1480
static void svm_clear_vintr(struct vcpu_svm *svm)
1481
{
1482
svm_clr_intercept(svm, INTERCEPT_VINTR);
1483
1484
/* Drop int_ctl fields related to VINTR injection. */
1485
svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1486
if (is_guest_mode(&svm->vcpu)) {
1487
svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1488
1489
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1490
(svm->nested.ctl.int_ctl & V_TPR_MASK));
1491
1492
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1493
V_IRQ_INJECTION_BITS_MASK;
1494
1495
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1496
}
1497
1498
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1499
}
1500
1501
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1502
{
1503
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1504
struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1505
1506
switch (seg) {
1507
case VCPU_SREG_CS: return &save->cs;
1508
case VCPU_SREG_DS: return &save->ds;
1509
case VCPU_SREG_ES: return &save->es;
1510
case VCPU_SREG_FS: return &save01->fs;
1511
case VCPU_SREG_GS: return &save01->gs;
1512
case VCPU_SREG_SS: return &save->ss;
1513
case VCPU_SREG_TR: return &save01->tr;
1514
case VCPU_SREG_LDTR: return &save01->ldtr;
1515
}
1516
BUG();
1517
return NULL;
1518
}
1519
1520
static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1521
{
1522
struct vmcb_seg *s = svm_seg(vcpu, seg);
1523
1524
return s->base;
1525
}
1526
1527
static void svm_get_segment(struct kvm_vcpu *vcpu,
1528
struct kvm_segment *var, int seg)
1529
{
1530
struct vmcb_seg *s = svm_seg(vcpu, seg);
1531
1532
var->base = s->base;
1533
var->limit = s->limit;
1534
var->selector = s->selector;
1535
var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1536
var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1537
var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1538
var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1539
var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1540
var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1541
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1542
1543
/*
1544
* AMD CPUs circa 2014 track the G bit for all segments except CS.
1545
* However, the SVM spec states that the G bit is not observed by the
1546
* CPU, and some VMware virtual CPUs drop the G bit for all segments.
1547
* So let's synthesize a legal G bit for all segments, this helps
1548
* running KVM nested. It also helps cross-vendor migration, because
1549
* Intel's vmentry has a check on the 'G' bit.
1550
*/
1551
var->g = s->limit > 0xfffff;
1552
1553
/*
1554
* AMD's VMCB does not have an explicit unusable field, so emulate it
1555
* for cross vendor migration purposes by "not present"
1556
*/
1557
var->unusable = !var->present;
1558
1559
switch (seg) {
1560
case VCPU_SREG_TR:
1561
/*
1562
* Work around a bug where the busy flag in the tr selector
1563
* isn't exposed
1564
*/
1565
var->type |= 0x2;
1566
break;
1567
case VCPU_SREG_DS:
1568
case VCPU_SREG_ES:
1569
case VCPU_SREG_FS:
1570
case VCPU_SREG_GS:
1571
/*
1572
* The accessed bit must always be set in the segment
1573
* descriptor cache, although it can be cleared in the
1574
* descriptor, the cached bit always remains at 1. Since
1575
* Intel has a check on this, set it here to support
1576
* cross-vendor migration.
1577
*/
1578
if (!var->unusable)
1579
var->type |= 0x1;
1580
break;
1581
case VCPU_SREG_SS:
1582
/*
1583
* On AMD CPUs sometimes the DB bit in the segment
1584
* descriptor is left as 1, although the whole segment has
1585
* been made unusable. Clear it here to pass an Intel VMX
1586
* entry check when cross vendor migrating.
1587
*/
1588
if (var->unusable)
1589
var->db = 0;
1590
/* This is symmetric with svm_set_segment() */
1591
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1592
break;
1593
}
1594
}
1595
1596
static int svm_get_cpl(struct kvm_vcpu *vcpu)
1597
{
1598
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1599
1600
return save->cpl;
1601
}
1602
1603
static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1604
{
1605
struct kvm_segment cs;
1606
1607
svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1608
*db = cs.db;
1609
*l = cs.l;
1610
}
1611
1612
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1613
{
1614
struct vcpu_svm *svm = to_svm(vcpu);
1615
1616
dt->size = svm->vmcb->save.idtr.limit;
1617
dt->address = svm->vmcb->save.idtr.base;
1618
}
1619
1620
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1621
{
1622
struct vcpu_svm *svm = to_svm(vcpu);
1623
1624
svm->vmcb->save.idtr.limit = dt->size;
1625
svm->vmcb->save.idtr.base = dt->address ;
1626
vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1627
}
1628
1629
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1630
{
1631
struct vcpu_svm *svm = to_svm(vcpu);
1632
1633
dt->size = svm->vmcb->save.gdtr.limit;
1634
dt->address = svm->vmcb->save.gdtr.base;
1635
}
1636
1637
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1638
{
1639
struct vcpu_svm *svm = to_svm(vcpu);
1640
1641
svm->vmcb->save.gdtr.limit = dt->size;
1642
svm->vmcb->save.gdtr.base = dt->address ;
1643
vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1644
}
1645
1646
static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1647
{
1648
struct vcpu_svm *svm = to_svm(vcpu);
1649
1650
/*
1651
* For guests that don't set guest_state_protected, the cr3 update is
1652
* handled via kvm_mmu_load() while entering the guest. For guests
1653
* that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1654
* VMCB save area now, since the save area will become the initial
1655
* contents of the VMSA, and future VMCB save area updates won't be
1656
* seen.
1657
*/
1658
if (sev_es_guest(vcpu->kvm)) {
1659
svm->vmcb->save.cr3 = cr3;
1660
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1661
}
1662
}
1663
1664
static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1665
{
1666
return true;
1667
}
1668
1669
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1670
{
1671
struct vcpu_svm *svm = to_svm(vcpu);
1672
u64 hcr0 = cr0;
1673
bool old_paging = is_paging(vcpu);
1674
1675
#ifdef CONFIG_X86_64
1676
if (vcpu->arch.efer & EFER_LME) {
1677
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1678
vcpu->arch.efer |= EFER_LMA;
1679
if (!vcpu->arch.guest_state_protected)
1680
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1681
}
1682
1683
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1684
vcpu->arch.efer &= ~EFER_LMA;
1685
if (!vcpu->arch.guest_state_protected)
1686
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1687
}
1688
}
1689
#endif
1690
vcpu->arch.cr0 = cr0;
1691
1692
if (!npt_enabled) {
1693
hcr0 |= X86_CR0_PG | X86_CR0_WP;
1694
if (old_paging != is_paging(vcpu))
1695
svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1696
}
1697
1698
/*
1699
* re-enable caching here because the QEMU bios
1700
* does not do it - this results in some delay at
1701
* reboot
1702
*/
1703
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1704
hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1705
1706
svm->vmcb->save.cr0 = hcr0;
1707
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1708
1709
/*
1710
* SEV-ES guests must always keep the CR intercepts cleared. CR
1711
* tracking is done using the CR write traps.
1712
*/
1713
if (sev_es_guest(vcpu->kvm))
1714
return;
1715
1716
if (hcr0 == cr0) {
1717
/* Selective CR0 write remains on. */
1718
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1719
svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1720
} else {
1721
svm_set_intercept(svm, INTERCEPT_CR0_READ);
1722
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1723
}
1724
}
1725
1726
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1727
{
1728
return true;
1729
}
1730
1731
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1732
{
1733
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1734
unsigned long old_cr4 = vcpu->arch.cr4;
1735
1736
vcpu->arch.cr4 = cr4;
1737
if (!npt_enabled) {
1738
cr4 |= X86_CR4_PAE;
1739
1740
if (!is_paging(vcpu))
1741
cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1742
}
1743
cr4 |= host_cr4_mce;
1744
to_svm(vcpu)->vmcb->save.cr4 = cr4;
1745
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1746
1747
if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1748
vcpu->arch.cpuid_dynamic_bits_dirty = true;
1749
}
1750
1751
static void svm_set_segment(struct kvm_vcpu *vcpu,
1752
struct kvm_segment *var, int seg)
1753
{
1754
struct vcpu_svm *svm = to_svm(vcpu);
1755
struct vmcb_seg *s = svm_seg(vcpu, seg);
1756
1757
s->base = var->base;
1758
s->limit = var->limit;
1759
s->selector = var->selector;
1760
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1761
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1762
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1763
s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1764
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1765
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1766
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1767
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1768
1769
/*
1770
* This is always accurate, except if SYSRET returned to a segment
1771
* with SS.DPL != 3. Intel does not have this quirk, and always
1772
* forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1773
* would entail passing the CPL to userspace and back.
1774
*/
1775
if (seg == VCPU_SREG_SS)
1776
/* This is symmetric with svm_get_segment() */
1777
svm->vmcb->save.cpl = (var->dpl & 3);
1778
1779
vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1780
}
1781
1782
static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1783
{
1784
struct vcpu_svm *svm = to_svm(vcpu);
1785
1786
clr_exception_intercept(svm, BP_VECTOR);
1787
1788
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1789
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1790
set_exception_intercept(svm, BP_VECTOR);
1791
}
1792
}
1793
1794
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1795
{
1796
if (sd->next_asid > sd->max_asid) {
1797
++sd->asid_generation;
1798
sd->next_asid = sd->min_asid;
1799
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1800
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1801
}
1802
1803
svm->current_vmcb->asid_generation = sd->asid_generation;
1804
svm->asid = sd->next_asid++;
1805
}
1806
1807
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1808
{
1809
struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1810
1811
if (vcpu->arch.guest_state_protected)
1812
return;
1813
1814
if (unlikely(value != vmcb->save.dr6)) {
1815
vmcb->save.dr6 = value;
1816
vmcb_mark_dirty(vmcb, VMCB_DR);
1817
}
1818
}
1819
1820
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1821
{
1822
struct vcpu_svm *svm = to_svm(vcpu);
1823
1824
if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm)))
1825
return;
1826
1827
get_debugreg(vcpu->arch.db[0], 0);
1828
get_debugreg(vcpu->arch.db[1], 1);
1829
get_debugreg(vcpu->arch.db[2], 2);
1830
get_debugreg(vcpu->arch.db[3], 3);
1831
/*
1832
* We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
1833
* because db_interception might need it. We can do it before vmentry.
1834
*/
1835
vcpu->arch.dr6 = svm->vmcb->save.dr6;
1836
vcpu->arch.dr7 = svm->vmcb->save.dr7;
1837
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1838
set_dr_intercepts(svm);
1839
}
1840
1841
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1842
{
1843
struct vcpu_svm *svm = to_svm(vcpu);
1844
1845
if (vcpu->arch.guest_state_protected)
1846
return;
1847
1848
svm->vmcb->save.dr7 = value;
1849
vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1850
}
1851
1852
static int pf_interception(struct kvm_vcpu *vcpu)
1853
{
1854
struct vcpu_svm *svm = to_svm(vcpu);
1855
1856
u64 fault_address = svm->vmcb->control.exit_info_2;
1857
u64 error_code = svm->vmcb->control.exit_info_1;
1858
1859
return kvm_handle_page_fault(vcpu, error_code, fault_address,
1860
static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1861
svm->vmcb->control.insn_bytes : NULL,
1862
svm->vmcb->control.insn_len);
1863
}
1864
1865
static int npf_interception(struct kvm_vcpu *vcpu)
1866
{
1867
struct vcpu_svm *svm = to_svm(vcpu);
1868
int rc;
1869
1870
u64 fault_address = svm->vmcb->control.exit_info_2;
1871
u64 error_code = svm->vmcb->control.exit_info_1;
1872
1873
/*
1874
* WARN if hardware generates a fault with an error code that collides
1875
* with KVM-defined sythentic flags. Clear the flags and continue on,
1876
* i.e. don't terminate the VM, as KVM can't possibly be relying on a
1877
* flag that KVM doesn't know about.
1878
*/
1879
if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK))
1880
error_code &= ~PFERR_SYNTHETIC_MASK;
1881
1882
if (sev_snp_guest(vcpu->kvm) && (error_code & PFERR_GUEST_ENC_MASK))
1883
error_code |= PFERR_PRIVATE_ACCESS;
1884
1885
trace_kvm_page_fault(vcpu, fault_address, error_code);
1886
rc = kvm_mmu_page_fault(vcpu, fault_address, error_code,
1887
static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1888
svm->vmcb->control.insn_bytes : NULL,
1889
svm->vmcb->control.insn_len);
1890
1891
if (rc > 0 && error_code & PFERR_GUEST_RMP_MASK)
1892
sev_handle_rmp_fault(vcpu, fault_address, error_code);
1893
1894
return rc;
1895
}
1896
1897
static int db_interception(struct kvm_vcpu *vcpu)
1898
{
1899
struct kvm_run *kvm_run = vcpu->run;
1900
struct vcpu_svm *svm = to_svm(vcpu);
1901
1902
if (!(vcpu->guest_debug &
1903
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1904
!svm->nmi_singlestep) {
1905
u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
1906
kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
1907
return 1;
1908
}
1909
1910
if (svm->nmi_singlestep) {
1911
disable_nmi_singlestep(svm);
1912
/* Make sure we check for pending NMIs upon entry */
1913
kvm_make_request(KVM_REQ_EVENT, vcpu);
1914
}
1915
1916
if (vcpu->guest_debug &
1917
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1918
kvm_run->exit_reason = KVM_EXIT_DEBUG;
1919
kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
1920
kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
1921
kvm_run->debug.arch.pc =
1922
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1923
kvm_run->debug.arch.exception = DB_VECTOR;
1924
return 0;
1925
}
1926
1927
return 1;
1928
}
1929
1930
static int bp_interception(struct kvm_vcpu *vcpu)
1931
{
1932
struct vcpu_svm *svm = to_svm(vcpu);
1933
struct kvm_run *kvm_run = vcpu->run;
1934
1935
kvm_run->exit_reason = KVM_EXIT_DEBUG;
1936
kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1937
kvm_run->debug.arch.exception = BP_VECTOR;
1938
return 0;
1939
}
1940
1941
static int ud_interception(struct kvm_vcpu *vcpu)
1942
{
1943
return handle_ud(vcpu);
1944
}
1945
1946
static int ac_interception(struct kvm_vcpu *vcpu)
1947
{
1948
kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
1949
return 1;
1950
}
1951
1952
static bool is_erratum_383(void)
1953
{
1954
int i;
1955
u64 value;
1956
1957
if (!erratum_383_found)
1958
return false;
1959
1960
if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value))
1961
return false;
1962
1963
/* Bit 62 may or may not be set for this mce */
1964
value &= ~(1ULL << 62);
1965
1966
if (value != 0xb600000000010015ULL)
1967
return false;
1968
1969
/* Clear MCi_STATUS registers */
1970
for (i = 0; i < 6; ++i)
1971
native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0);
1972
1973
if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) {
1974
value &= ~(1ULL << 2);
1975
native_write_msr_safe(MSR_IA32_MCG_STATUS, value);
1976
}
1977
1978
/* Flush tlb to evict multi-match entries */
1979
__flush_tlb_all();
1980
1981
return true;
1982
}
1983
1984
static void svm_handle_mce(struct kvm_vcpu *vcpu)
1985
{
1986
if (is_erratum_383()) {
1987
/*
1988
* Erratum 383 triggered. Guest state is corrupt so kill the
1989
* guest.
1990
*/
1991
pr_err("Guest triggered AMD Erratum 383\n");
1992
1993
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1994
1995
return;
1996
}
1997
1998
/*
1999
* On an #MC intercept the MCE handler is not called automatically in
2000
* the host. So do it by hand here.
2001
*/
2002
kvm_machine_check();
2003
}
2004
2005
static int mc_interception(struct kvm_vcpu *vcpu)
2006
{
2007
return 1;
2008
}
2009
2010
static int shutdown_interception(struct kvm_vcpu *vcpu)
2011
{
2012
struct kvm_run *kvm_run = vcpu->run;
2013
struct vcpu_svm *svm = to_svm(vcpu);
2014
2015
2016
/*
2017
* VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
2018
* the VMCB in a known good state. Unfortuately, KVM doesn't have
2019
* KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2020
* userspace. At a platform view, INIT is acceptable behavior as
2021
* there exist bare metal platforms that automatically INIT the CPU
2022
* in response to shutdown.
2023
*
2024
* The VM save area for SEV-ES guests has already been encrypted so it
2025
* cannot be reinitialized, i.e. synthesizing INIT is futile.
2026
*/
2027
if (!sev_es_guest(vcpu->kvm)) {
2028
clear_page(svm->vmcb);
2029
#ifdef CONFIG_KVM_SMM
2030
if (is_smm(vcpu))
2031
kvm_smm_changed(vcpu, false);
2032
#endif
2033
kvm_vcpu_reset(vcpu, true);
2034
}
2035
2036
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2037
return 0;
2038
}
2039
2040
static int io_interception(struct kvm_vcpu *vcpu)
2041
{
2042
struct vcpu_svm *svm = to_svm(vcpu);
2043
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2044
int size, in, string;
2045
unsigned port;
2046
2047
++vcpu->stat.io_exits;
2048
string = (io_info & SVM_IOIO_STR_MASK) != 0;
2049
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2050
port = io_info >> 16;
2051
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2052
2053
if (string) {
2054
if (sev_es_guest(vcpu->kvm))
2055
return sev_es_string_io(svm, size, port, in);
2056
else
2057
return kvm_emulate_instruction(vcpu, 0);
2058
}
2059
2060
svm->next_rip = svm->vmcb->control.exit_info_2;
2061
2062
return kvm_fast_pio(vcpu, size, port, in);
2063
}
2064
2065
static int nmi_interception(struct kvm_vcpu *vcpu)
2066
{
2067
return 1;
2068
}
2069
2070
static int smi_interception(struct kvm_vcpu *vcpu)
2071
{
2072
return 1;
2073
}
2074
2075
static int intr_interception(struct kvm_vcpu *vcpu)
2076
{
2077
++vcpu->stat.irq_exits;
2078
return 1;
2079
}
2080
2081
static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2082
{
2083
struct vcpu_svm *svm = to_svm(vcpu);
2084
struct vmcb *vmcb12;
2085
struct kvm_host_map map;
2086
int ret;
2087
2088
if (nested_svm_check_permissions(vcpu))
2089
return 1;
2090
2091
ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2092
if (ret) {
2093
if (ret == -EINVAL)
2094
kvm_inject_gp(vcpu, 0);
2095
return 1;
2096
}
2097
2098
vmcb12 = map.hva;
2099
2100
ret = kvm_skip_emulated_instruction(vcpu);
2101
2102
if (vmload) {
2103
svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
2104
svm->sysenter_eip_hi = 0;
2105
svm->sysenter_esp_hi = 0;
2106
} else {
2107
svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
2108
}
2109
2110
kvm_vcpu_unmap(vcpu, &map);
2111
2112
return ret;
2113
}
2114
2115
static int vmload_interception(struct kvm_vcpu *vcpu)
2116
{
2117
return vmload_vmsave_interception(vcpu, true);
2118
}
2119
2120
static int vmsave_interception(struct kvm_vcpu *vcpu)
2121
{
2122
return vmload_vmsave_interception(vcpu, false);
2123
}
2124
2125
static int vmrun_interception(struct kvm_vcpu *vcpu)
2126
{
2127
if (nested_svm_check_permissions(vcpu))
2128
return 1;
2129
2130
return nested_svm_vmrun(vcpu);
2131
}
2132
2133
enum {
2134
NONE_SVM_INSTR,
2135
SVM_INSTR_VMRUN,
2136
SVM_INSTR_VMLOAD,
2137
SVM_INSTR_VMSAVE,
2138
};
2139
2140
/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2141
static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2142
{
2143
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2144
2145
if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2146
return NONE_SVM_INSTR;
2147
2148
switch (ctxt->modrm) {
2149
case 0xd8: /* VMRUN */
2150
return SVM_INSTR_VMRUN;
2151
case 0xda: /* VMLOAD */
2152
return SVM_INSTR_VMLOAD;
2153
case 0xdb: /* VMSAVE */
2154
return SVM_INSTR_VMSAVE;
2155
default:
2156
break;
2157
}
2158
2159
return NONE_SVM_INSTR;
2160
}
2161
2162
static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2163
{
2164
const int guest_mode_exit_codes[] = {
2165
[SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
2166
[SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
2167
[SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
2168
};
2169
int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
2170
[SVM_INSTR_VMRUN] = vmrun_interception,
2171
[SVM_INSTR_VMLOAD] = vmload_interception,
2172
[SVM_INSTR_VMSAVE] = vmsave_interception,
2173
};
2174
struct vcpu_svm *svm = to_svm(vcpu);
2175
int ret;
2176
2177
if (is_guest_mode(vcpu)) {
2178
/* Returns '1' or -errno on failure, '0' on success. */
2179
ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
2180
if (ret)
2181
return ret;
2182
return 1;
2183
}
2184
return svm_instr_handlers[opcode](vcpu);
2185
}
2186
2187
/*
2188
* #GP handling code. Note that #GP can be triggered under the following two
2189
* cases:
2190
* 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2191
* some AMD CPUs when EAX of these instructions are in the reserved memory
2192
* regions (e.g. SMM memory on host).
2193
* 2) VMware backdoor
2194
*/
2195
static int gp_interception(struct kvm_vcpu *vcpu)
2196
{
2197
struct vcpu_svm *svm = to_svm(vcpu);
2198
u32 error_code = svm->vmcb->control.exit_info_1;
2199
int opcode;
2200
2201
/* Both #GP cases have zero error_code */
2202
if (error_code)
2203
goto reinject;
2204
2205
/* Decode the instruction for usage later */
2206
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2207
goto reinject;
2208
2209
opcode = svm_instr_opcode(vcpu);
2210
2211
if (opcode == NONE_SVM_INSTR) {
2212
if (!enable_vmware_backdoor)
2213
goto reinject;
2214
2215
/*
2216
* VMware backdoor emulation on #GP interception only handles
2217
* IN{S}, OUT{S}, and RDPMC.
2218
*/
2219
if (!is_guest_mode(vcpu))
2220
return kvm_emulate_instruction(vcpu,
2221
EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2222
} else {
2223
/* All SVM instructions expect page aligned RAX */
2224
if (svm->vmcb->save.rax & ~PAGE_MASK)
2225
goto reinject;
2226
2227
return emulate_svm_instr(vcpu, opcode);
2228
}
2229
2230
reinject:
2231
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2232
return 1;
2233
}
2234
2235
void svm_set_gif(struct vcpu_svm *svm, bool value)
2236
{
2237
if (value) {
2238
/*
2239
* If VGIF is enabled, the STGI intercept is only added to
2240
* detect the opening of the SMI/NMI window; remove it now.
2241
* Likewise, clear the VINTR intercept, we will set it
2242
* again while processing KVM_REQ_EVENT if needed.
2243
*/
2244
if (vgif)
2245
svm_clr_intercept(svm, INTERCEPT_STGI);
2246
if (svm_is_intercept(svm, INTERCEPT_VINTR))
2247
svm_clear_vintr(svm);
2248
2249
enable_gif(svm);
2250
if (svm->vcpu.arch.smi_pending ||
2251
svm->vcpu.arch.nmi_pending ||
2252
kvm_cpu_has_injectable_intr(&svm->vcpu) ||
2253
kvm_apic_has_pending_init_or_sipi(&svm->vcpu))
2254
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2255
} else {
2256
disable_gif(svm);
2257
2258
/*
2259
* After a CLGI no interrupts should come. But if vGIF is
2260
* in use, we still rely on the VINTR intercept (rather than
2261
* STGI) to detect an open interrupt window.
2262
*/
2263
if (!vgif)
2264
svm_clear_vintr(svm);
2265
}
2266
}
2267
2268
static int stgi_interception(struct kvm_vcpu *vcpu)
2269
{
2270
int ret;
2271
2272
if (nested_svm_check_permissions(vcpu))
2273
return 1;
2274
2275
ret = kvm_skip_emulated_instruction(vcpu);
2276
svm_set_gif(to_svm(vcpu), true);
2277
return ret;
2278
}
2279
2280
static int clgi_interception(struct kvm_vcpu *vcpu)
2281
{
2282
int ret;
2283
2284
if (nested_svm_check_permissions(vcpu))
2285
return 1;
2286
2287
ret = kvm_skip_emulated_instruction(vcpu);
2288
svm_set_gif(to_svm(vcpu), false);
2289
return ret;
2290
}
2291
2292
static int invlpga_interception(struct kvm_vcpu *vcpu)
2293
{
2294
gva_t gva = kvm_rax_read(vcpu);
2295
u32 asid = kvm_rcx_read(vcpu);
2296
2297
/* FIXME: Handle an address size prefix. */
2298
if (!is_long_mode(vcpu))
2299
gva = (u32)gva;
2300
2301
trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2302
2303
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2304
kvm_mmu_invlpg(vcpu, gva);
2305
2306
return kvm_skip_emulated_instruction(vcpu);
2307
}
2308
2309
static int skinit_interception(struct kvm_vcpu *vcpu)
2310
{
2311
trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2312
2313
kvm_queue_exception(vcpu, UD_VECTOR);
2314
return 1;
2315
}
2316
2317
static int task_switch_interception(struct kvm_vcpu *vcpu)
2318
{
2319
struct vcpu_svm *svm = to_svm(vcpu);
2320
u16 tss_selector;
2321
int reason;
2322
int int_type = svm->vmcb->control.exit_int_info &
2323
SVM_EXITINTINFO_TYPE_MASK;
2324
int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2325
uint32_t type =
2326
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2327
uint32_t idt_v =
2328
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2329
bool has_error_code = false;
2330
u32 error_code = 0;
2331
2332
tss_selector = (u16)svm->vmcb->control.exit_info_1;
2333
2334
if (svm->vmcb->control.exit_info_2 &
2335
(1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2336
reason = TASK_SWITCH_IRET;
2337
else if (svm->vmcb->control.exit_info_2 &
2338
(1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2339
reason = TASK_SWITCH_JMP;
2340
else if (idt_v)
2341
reason = TASK_SWITCH_GATE;
2342
else
2343
reason = TASK_SWITCH_CALL;
2344
2345
if (reason == TASK_SWITCH_GATE) {
2346
switch (type) {
2347
case SVM_EXITINTINFO_TYPE_NMI:
2348
vcpu->arch.nmi_injected = false;
2349
break;
2350
case SVM_EXITINTINFO_TYPE_EXEPT:
2351
if (svm->vmcb->control.exit_info_2 &
2352
(1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2353
has_error_code = true;
2354
error_code =
2355
(u32)svm->vmcb->control.exit_info_2;
2356
}
2357
kvm_clear_exception_queue(vcpu);
2358
break;
2359
case SVM_EXITINTINFO_TYPE_INTR:
2360
case SVM_EXITINTINFO_TYPE_SOFT:
2361
kvm_clear_interrupt_queue(vcpu);
2362
break;
2363
default:
2364
break;
2365
}
2366
}
2367
2368
if (reason != TASK_SWITCH_GATE ||
2369
int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2370
(int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2371
(int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2372
if (!svm_skip_emulated_instruction(vcpu))
2373
return 0;
2374
}
2375
2376
if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2377
int_vec = -1;
2378
2379
return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2380
has_error_code, error_code);
2381
}
2382
2383
static void svm_clr_iret_intercept(struct vcpu_svm *svm)
2384
{
2385
if (!sev_es_guest(svm->vcpu.kvm))
2386
svm_clr_intercept(svm, INTERCEPT_IRET);
2387
}
2388
2389
static void svm_set_iret_intercept(struct vcpu_svm *svm)
2390
{
2391
if (!sev_es_guest(svm->vcpu.kvm))
2392
svm_set_intercept(svm, INTERCEPT_IRET);
2393
}
2394
2395
static int iret_interception(struct kvm_vcpu *vcpu)
2396
{
2397
struct vcpu_svm *svm = to_svm(vcpu);
2398
2399
WARN_ON_ONCE(sev_es_guest(vcpu->kvm));
2400
2401
++vcpu->stat.nmi_window_exits;
2402
svm->awaiting_iret_completion = true;
2403
2404
svm_clr_iret_intercept(svm);
2405
svm->nmi_iret_rip = kvm_rip_read(vcpu);
2406
2407
kvm_make_request(KVM_REQ_EVENT, vcpu);
2408
return 1;
2409
}
2410
2411
static int invlpg_interception(struct kvm_vcpu *vcpu)
2412
{
2413
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2414
return kvm_emulate_instruction(vcpu, 0);
2415
2416
kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2417
return kvm_skip_emulated_instruction(vcpu);
2418
}
2419
2420
static int emulate_on_interception(struct kvm_vcpu *vcpu)
2421
{
2422
return kvm_emulate_instruction(vcpu, 0);
2423
}
2424
2425
static int rsm_interception(struct kvm_vcpu *vcpu)
2426
{
2427
return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2428
}
2429
2430
static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2431
unsigned long val)
2432
{
2433
struct vcpu_svm *svm = to_svm(vcpu);
2434
unsigned long cr0 = vcpu->arch.cr0;
2435
bool ret = false;
2436
2437
if (!is_guest_mode(vcpu) ||
2438
(!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2439
return false;
2440
2441
cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2442
val &= ~SVM_CR0_SELECTIVE_MASK;
2443
2444
if (cr0 ^ val) {
2445
svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2446
ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2447
}
2448
2449
return ret;
2450
}
2451
2452
#define CR_VALID (1ULL << 63)
2453
2454
static int cr_interception(struct kvm_vcpu *vcpu)
2455
{
2456
struct vcpu_svm *svm = to_svm(vcpu);
2457
int reg, cr;
2458
unsigned long val;
2459
int err;
2460
2461
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2462
return emulate_on_interception(vcpu);
2463
2464
if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2465
return emulate_on_interception(vcpu);
2466
2467
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2468
if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2469
cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2470
else
2471
cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2472
2473
err = 0;
2474
if (cr >= 16) { /* mov to cr */
2475
cr -= 16;
2476
val = kvm_register_read(vcpu, reg);
2477
trace_kvm_cr_write(cr, val);
2478
switch (cr) {
2479
case 0:
2480
if (!check_selective_cr0_intercepted(vcpu, val))
2481
err = kvm_set_cr0(vcpu, val);
2482
else
2483
return 1;
2484
2485
break;
2486
case 3:
2487
err = kvm_set_cr3(vcpu, val);
2488
break;
2489
case 4:
2490
err = kvm_set_cr4(vcpu, val);
2491
break;
2492
case 8:
2493
err = kvm_set_cr8(vcpu, val);
2494
break;
2495
default:
2496
WARN(1, "unhandled write to CR%d", cr);
2497
kvm_queue_exception(vcpu, UD_VECTOR);
2498
return 1;
2499
}
2500
} else { /* mov from cr */
2501
switch (cr) {
2502
case 0:
2503
val = kvm_read_cr0(vcpu);
2504
break;
2505
case 2:
2506
val = vcpu->arch.cr2;
2507
break;
2508
case 3:
2509
val = kvm_read_cr3(vcpu);
2510
break;
2511
case 4:
2512
val = kvm_read_cr4(vcpu);
2513
break;
2514
case 8:
2515
val = kvm_get_cr8(vcpu);
2516
break;
2517
default:
2518
WARN(1, "unhandled read from CR%d", cr);
2519
kvm_queue_exception(vcpu, UD_VECTOR);
2520
return 1;
2521
}
2522
kvm_register_write(vcpu, reg, val);
2523
trace_kvm_cr_read(cr, val);
2524
}
2525
return kvm_complete_insn_gp(vcpu, err);
2526
}
2527
2528
static int cr_trap(struct kvm_vcpu *vcpu)
2529
{
2530
struct vcpu_svm *svm = to_svm(vcpu);
2531
unsigned long old_value, new_value;
2532
unsigned int cr;
2533
int ret = 0;
2534
2535
new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2536
2537
cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2538
switch (cr) {
2539
case 0:
2540
old_value = kvm_read_cr0(vcpu);
2541
svm_set_cr0(vcpu, new_value);
2542
2543
kvm_post_set_cr0(vcpu, old_value, new_value);
2544
break;
2545
case 4:
2546
old_value = kvm_read_cr4(vcpu);
2547
svm_set_cr4(vcpu, new_value);
2548
2549
kvm_post_set_cr4(vcpu, old_value, new_value);
2550
break;
2551
case 8:
2552
ret = kvm_set_cr8(vcpu, new_value);
2553
break;
2554
default:
2555
WARN(1, "unhandled CR%d write trap", cr);
2556
kvm_queue_exception(vcpu, UD_VECTOR);
2557
return 1;
2558
}
2559
2560
return kvm_complete_insn_gp(vcpu, ret);
2561
}
2562
2563
static int dr_interception(struct kvm_vcpu *vcpu)
2564
{
2565
struct vcpu_svm *svm = to_svm(vcpu);
2566
int reg, dr;
2567
int err = 0;
2568
2569
/*
2570
* SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
2571
* for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
2572
*/
2573
if (sev_es_guest(vcpu->kvm))
2574
return 1;
2575
2576
if (vcpu->guest_debug == 0) {
2577
/*
2578
* No more DR vmexits; force a reload of the debug registers
2579
* and reenter on this instruction. The next vmexit will
2580
* retrieve the full state of the debug registers.
2581
*/
2582
clr_dr_intercepts(svm);
2583
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2584
return 1;
2585
}
2586
2587
if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2588
return emulate_on_interception(vcpu);
2589
2590
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2591
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2592
if (dr >= 16) { /* mov to DRn */
2593
dr -= 16;
2594
err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
2595
} else {
2596
kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
2597
}
2598
2599
return kvm_complete_insn_gp(vcpu, err);
2600
}
2601
2602
static int cr8_write_interception(struct kvm_vcpu *vcpu)
2603
{
2604
int r;
2605
2606
u8 cr8_prev = kvm_get_cr8(vcpu);
2607
/* instruction emulation calls kvm_set_cr8() */
2608
r = cr_interception(vcpu);
2609
if (lapic_in_kernel(vcpu))
2610
return r;
2611
if (cr8_prev <= kvm_get_cr8(vcpu))
2612
return r;
2613
vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2614
return 0;
2615
}
2616
2617
static int efer_trap(struct kvm_vcpu *vcpu)
2618
{
2619
struct msr_data msr_info;
2620
int ret;
2621
2622
/*
2623
* Clear the EFER_SVME bit from EFER. The SVM code always sets this
2624
* bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2625
* whether the guest has X86_FEATURE_SVM - this avoids a failure if
2626
* the guest doesn't have X86_FEATURE_SVM.
2627
*/
2628
msr_info.host_initiated = false;
2629
msr_info.index = MSR_EFER;
2630
msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2631
ret = kvm_set_msr_common(vcpu, &msr_info);
2632
2633
return kvm_complete_insn_gp(vcpu, ret);
2634
}
2635
2636
static int svm_get_feature_msr(u32 msr, u64 *data)
2637
{
2638
*data = 0;
2639
2640
switch (msr) {
2641
case MSR_AMD64_DE_CFG:
2642
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2643
*data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2644
break;
2645
default:
2646
return KVM_MSR_RET_UNSUPPORTED;
2647
}
2648
2649
return 0;
2650
}
2651
2652
static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu,
2653
struct msr_data *msr_info)
2654
{
2655
return sev_es_guest(vcpu->kvm) && vcpu->arch.guest_state_protected &&
2656
msr_info->index != MSR_IA32_XSS &&
2657
!msr_write_intercepted(vcpu, msr_info->index);
2658
}
2659
2660
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2661
{
2662
struct vcpu_svm *svm = to_svm(vcpu);
2663
2664
if (sev_es_prevent_msr_access(vcpu, msr_info)) {
2665
msr_info->data = 0;
2666
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
2667
}
2668
2669
switch (msr_info->index) {
2670
case MSR_AMD64_TSC_RATIO:
2671
if (!msr_info->host_initiated &&
2672
!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR))
2673
return 1;
2674
msr_info->data = svm->tsc_ratio_msr;
2675
break;
2676
case MSR_STAR:
2677
msr_info->data = svm->vmcb01.ptr->save.star;
2678
break;
2679
#ifdef CONFIG_X86_64
2680
case MSR_LSTAR:
2681
msr_info->data = svm->vmcb01.ptr->save.lstar;
2682
break;
2683
case MSR_CSTAR:
2684
msr_info->data = svm->vmcb01.ptr->save.cstar;
2685
break;
2686
case MSR_GS_BASE:
2687
msr_info->data = svm->vmcb01.ptr->save.gs.base;
2688
break;
2689
case MSR_FS_BASE:
2690
msr_info->data = svm->vmcb01.ptr->save.fs.base;
2691
break;
2692
case MSR_KERNEL_GS_BASE:
2693
msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2694
break;
2695
case MSR_SYSCALL_MASK:
2696
msr_info->data = svm->vmcb01.ptr->save.sfmask;
2697
break;
2698
#endif
2699
case MSR_IA32_SYSENTER_CS:
2700
msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2701
break;
2702
case MSR_IA32_SYSENTER_EIP:
2703
msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2704
if (guest_cpuid_is_intel_compatible(vcpu))
2705
msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2706
break;
2707
case MSR_IA32_SYSENTER_ESP:
2708
msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2709
if (guest_cpuid_is_intel_compatible(vcpu))
2710
msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2711
break;
2712
case MSR_IA32_S_CET:
2713
msr_info->data = svm->vmcb->save.s_cet;
2714
break;
2715
case MSR_IA32_INT_SSP_TAB:
2716
msr_info->data = svm->vmcb->save.isst_addr;
2717
break;
2718
case MSR_KVM_INTERNAL_GUEST_SSP:
2719
msr_info->data = svm->vmcb->save.ssp;
2720
break;
2721
case MSR_TSC_AUX:
2722
msr_info->data = svm->tsc_aux;
2723
break;
2724
case MSR_IA32_DEBUGCTLMSR:
2725
msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
2726
break;
2727
case MSR_IA32_LASTBRANCHFROMIP:
2728
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
2729
break;
2730
case MSR_IA32_LASTBRANCHTOIP:
2731
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
2732
break;
2733
case MSR_IA32_LASTINTFROMIP:
2734
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
2735
break;
2736
case MSR_IA32_LASTINTTOIP:
2737
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
2738
break;
2739
case MSR_VM_HSAVE_PA:
2740
msr_info->data = svm->nested.hsave_msr;
2741
break;
2742
case MSR_VM_CR:
2743
msr_info->data = svm->nested.vm_cr_msr;
2744
break;
2745
case MSR_IA32_SPEC_CTRL:
2746
if (!msr_info->host_initiated &&
2747
!guest_has_spec_ctrl_msr(vcpu))
2748
return 1;
2749
2750
if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2751
msr_info->data = svm->vmcb->save.spec_ctrl;
2752
else
2753
msr_info->data = svm->spec_ctrl;
2754
break;
2755
case MSR_AMD64_VIRT_SPEC_CTRL:
2756
if (!msr_info->host_initiated &&
2757
!guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
2758
return 1;
2759
2760
msr_info->data = svm->virt_spec_ctrl;
2761
break;
2762
case MSR_F15H_IC_CFG: {
2763
2764
int family, model;
2765
2766
family = guest_cpuid_family(vcpu);
2767
model = guest_cpuid_model(vcpu);
2768
2769
if (family < 0 || model < 0)
2770
return kvm_get_msr_common(vcpu, msr_info);
2771
2772
msr_info->data = 0;
2773
2774
if (family == 0x15 &&
2775
(model >= 0x2 && model < 0x20))
2776
msr_info->data = 0x1E;
2777
}
2778
break;
2779
case MSR_AMD64_DE_CFG:
2780
msr_info->data = svm->msr_decfg;
2781
break;
2782
default:
2783
return kvm_get_msr_common(vcpu, msr_info);
2784
}
2785
return 0;
2786
}
2787
2788
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2789
{
2790
struct vcpu_svm *svm = to_svm(vcpu);
2791
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2792
return kvm_complete_insn_gp(vcpu, err);
2793
2794
svm_vmgexit_inject_exception(svm, X86_TRAP_GP);
2795
return 1;
2796
}
2797
2798
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2799
{
2800
struct vcpu_svm *svm = to_svm(vcpu);
2801
int svm_dis, chg_mask;
2802
2803
if (data & ~SVM_VM_CR_VALID_MASK)
2804
return 1;
2805
2806
chg_mask = SVM_VM_CR_VALID_MASK;
2807
2808
if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2809
chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2810
2811
svm->nested.vm_cr_msr &= ~chg_mask;
2812
svm->nested.vm_cr_msr |= (data & chg_mask);
2813
2814
svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2815
2816
/* check for svm_disable while efer.svme is set */
2817
if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2818
return 1;
2819
2820
return 0;
2821
}
2822
2823
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2824
{
2825
struct vcpu_svm *svm = to_svm(vcpu);
2826
int ret = 0;
2827
2828
u32 ecx = msr->index;
2829
u64 data = msr->data;
2830
2831
if (sev_es_prevent_msr_access(vcpu, msr))
2832
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
2833
2834
switch (ecx) {
2835
case MSR_AMD64_TSC_RATIO:
2836
2837
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) {
2838
2839
if (!msr->host_initiated)
2840
return 1;
2841
/*
2842
* In case TSC scaling is not enabled, always
2843
* leave this MSR at the default value.
2844
*
2845
* Due to bug in qemu 6.2.0, it would try to set
2846
* this msr to 0 if tsc scaling is not enabled.
2847
* Ignore this value as well.
2848
*/
2849
if (data != 0 && data != svm->tsc_ratio_msr)
2850
return 1;
2851
break;
2852
}
2853
2854
if (data & SVM_TSC_RATIO_RSVD)
2855
return 1;
2856
2857
svm->tsc_ratio_msr = data;
2858
2859
if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
2860
is_guest_mode(vcpu))
2861
nested_svm_update_tsc_ratio_msr(vcpu);
2862
2863
break;
2864
case MSR_IA32_CR_PAT:
2865
ret = kvm_set_msr_common(vcpu, msr);
2866
if (ret)
2867
break;
2868
2869
svm->vmcb01.ptr->save.g_pat = data;
2870
if (is_guest_mode(vcpu))
2871
nested_vmcb02_compute_g_pat(svm);
2872
vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2873
break;
2874
case MSR_IA32_SPEC_CTRL:
2875
if (!msr->host_initiated &&
2876
!guest_has_spec_ctrl_msr(vcpu))
2877
return 1;
2878
2879
if (kvm_spec_ctrl_test_value(data))
2880
return 1;
2881
2882
if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2883
svm->vmcb->save.spec_ctrl = data;
2884
else
2885
svm->spec_ctrl = data;
2886
if (!data)
2887
break;
2888
2889
/*
2890
* For non-nested:
2891
* When it's written (to non-zero) for the first time, pass
2892
* it through.
2893
*
2894
* For nested:
2895
* The handling of the MSR bitmap for L2 guests is done in
2896
* nested_svm_merge_msrpm().
2897
* We update the L1 MSR bit as well since it will end up
2898
* touching the MSR anyway now.
2899
*/
2900
svm_disable_intercept_for_msr(vcpu, MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
2901
break;
2902
case MSR_AMD64_VIRT_SPEC_CTRL:
2903
if (!msr->host_initiated &&
2904
!guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD))
2905
return 1;
2906
2907
if (data & ~SPEC_CTRL_SSBD)
2908
return 1;
2909
2910
svm->virt_spec_ctrl = data;
2911
break;
2912
case MSR_STAR:
2913
svm->vmcb01.ptr->save.star = data;
2914
break;
2915
#ifdef CONFIG_X86_64
2916
case MSR_LSTAR:
2917
svm->vmcb01.ptr->save.lstar = data;
2918
break;
2919
case MSR_CSTAR:
2920
svm->vmcb01.ptr->save.cstar = data;
2921
break;
2922
case MSR_GS_BASE:
2923
svm->vmcb01.ptr->save.gs.base = data;
2924
break;
2925
case MSR_FS_BASE:
2926
svm->vmcb01.ptr->save.fs.base = data;
2927
break;
2928
case MSR_KERNEL_GS_BASE:
2929
svm->vmcb01.ptr->save.kernel_gs_base = data;
2930
break;
2931
case MSR_SYSCALL_MASK:
2932
svm->vmcb01.ptr->save.sfmask = data;
2933
break;
2934
#endif
2935
case MSR_IA32_SYSENTER_CS:
2936
svm->vmcb01.ptr->save.sysenter_cs = data;
2937
break;
2938
case MSR_IA32_SYSENTER_EIP:
2939
svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
2940
/*
2941
* We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
2942
* when we spoof an Intel vendor ID (for cross vendor migration).
2943
* In this case we use this intercept to track the high
2944
* 32 bit part of these msrs to support Intel's
2945
* implementation of SYSENTER/SYSEXIT.
2946
*/
2947
svm->sysenter_eip_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
2948
break;
2949
case MSR_IA32_SYSENTER_ESP:
2950
svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
2951
svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
2952
break;
2953
case MSR_IA32_S_CET:
2954
svm->vmcb->save.s_cet = data;
2955
vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
2956
break;
2957
case MSR_IA32_INT_SSP_TAB:
2958
svm->vmcb->save.isst_addr = data;
2959
vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
2960
break;
2961
case MSR_KVM_INTERNAL_GUEST_SSP:
2962
svm->vmcb->save.ssp = data;
2963
vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
2964
break;
2965
case MSR_TSC_AUX:
2966
/*
2967
* TSC_AUX is always virtualized for SEV-ES guests when the
2968
* feature is available. The user return MSR support is not
2969
* required in this case because TSC_AUX is restored on #VMEXIT
2970
* from the host save area.
2971
*/
2972
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
2973
break;
2974
2975
/*
2976
* TSC_AUX is usually changed only during boot and never read
2977
* directly. Intercept TSC_AUX and switch it via user return.
2978
*/
2979
preempt_disable();
2980
ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
2981
preempt_enable();
2982
if (ret)
2983
break;
2984
2985
svm->tsc_aux = data;
2986
break;
2987
case MSR_IA32_DEBUGCTLMSR:
2988
if (!lbrv) {
2989
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
2990
break;
2991
}
2992
2993
/*
2994
* Suppress BTF as KVM doesn't virtualize BTF, but there's no
2995
* way to communicate lack of support to the guest.
2996
*/
2997
if (data & DEBUGCTLMSR_BTF) {
2998
kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
2999
data &= ~DEBUGCTLMSR_BTF;
3000
}
3001
3002
if (data & DEBUGCTL_RESERVED_BITS)
3003
return 1;
3004
3005
svm_get_lbr_vmcb(svm)->save.dbgctl = data;
3006
svm_update_lbrv(vcpu);
3007
break;
3008
case MSR_VM_HSAVE_PA:
3009
/*
3010
* Old kernels did not validate the value written to
3011
* MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
3012
* value to allow live migrating buggy or malicious guests
3013
* originating from those kernels.
3014
*/
3015
if (!msr->host_initiated && !page_address_valid(vcpu, data))
3016
return 1;
3017
3018
svm->nested.hsave_msr = data & PAGE_MASK;
3019
break;
3020
case MSR_VM_CR:
3021
return svm_set_vm_cr(vcpu, data);
3022
case MSR_VM_IGNNE:
3023
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3024
break;
3025
case MSR_AMD64_DE_CFG: {
3026
u64 supported_de_cfg;
3027
3028
if (svm_get_feature_msr(ecx, &supported_de_cfg))
3029
return 1;
3030
3031
if (data & ~supported_de_cfg)
3032
return 1;
3033
3034
svm->msr_decfg = data;
3035
break;
3036
}
3037
default:
3038
return kvm_set_msr_common(vcpu, msr);
3039
}
3040
return ret;
3041
}
3042
3043
static int msr_interception(struct kvm_vcpu *vcpu)
3044
{
3045
if (to_svm(vcpu)->vmcb->control.exit_info_1)
3046
return kvm_emulate_wrmsr(vcpu);
3047
else
3048
return kvm_emulate_rdmsr(vcpu);
3049
}
3050
3051
static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3052
{
3053
kvm_make_request(KVM_REQ_EVENT, vcpu);
3054
svm_clear_vintr(to_svm(vcpu));
3055
3056
/*
3057
* If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3058
* In this case AVIC was temporarily disabled for
3059
* requesting the IRQ window and we have to re-enable it.
3060
*
3061
* If running nested, still remove the VM wide AVIC inhibit to
3062
* support case in which the interrupt window was requested when the
3063
* vCPU was not running nested.
3064
3065
* All vCPUs which run still run nested, will remain to have their
3066
* AVIC still inhibited due to per-cpu AVIC inhibition.
3067
*/
3068
kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3069
3070
++vcpu->stat.irq_window_exits;
3071
return 1;
3072
}
3073
3074
static int pause_interception(struct kvm_vcpu *vcpu)
3075
{
3076
bool in_kernel;
3077
/*
3078
* CPL is not made available for an SEV-ES guest, therefore
3079
* vcpu->arch.preempted_in_kernel can never be true. Just
3080
* set in_kernel to false as well.
3081
*/
3082
in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
3083
3084
grow_ple_window(vcpu);
3085
3086
kvm_vcpu_on_spin(vcpu, in_kernel);
3087
return kvm_skip_emulated_instruction(vcpu);
3088
}
3089
3090
static int invpcid_interception(struct kvm_vcpu *vcpu)
3091
{
3092
struct vcpu_svm *svm = to_svm(vcpu);
3093
unsigned long type;
3094
gva_t gva;
3095
3096
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_INVPCID)) {
3097
kvm_queue_exception(vcpu, UD_VECTOR);
3098
return 1;
3099
}
3100
3101
/*
3102
* For an INVPCID intercept:
3103
* EXITINFO1 provides the linear address of the memory operand.
3104
* EXITINFO2 provides the contents of the register operand.
3105
*/
3106
type = svm->vmcb->control.exit_info_2;
3107
gva = svm->vmcb->control.exit_info_1;
3108
3109
/*
3110
* FIXME: Perform segment checks for 32-bit mode, and inject #SS if the
3111
* stack segment is used. The intercept takes priority over all
3112
* #GP checks except CPL>0, but somehow still generates a linear
3113
* address? The APM is sorely lacking.
3114
*/
3115
if (is_noncanonical_address(gva, vcpu, 0)) {
3116
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3117
return 1;
3118
}
3119
3120
return kvm_handle_invpcid(vcpu, type, gva);
3121
}
3122
3123
static inline int complete_userspace_buslock(struct kvm_vcpu *vcpu)
3124
{
3125
struct vcpu_svm *svm = to_svm(vcpu);
3126
3127
/*
3128
* If userspace has NOT changed RIP, then KVM's ABI is to let the guest
3129
* execute the bus-locking instruction. Set the bus lock counter to '1'
3130
* to effectively step past the bus lock.
3131
*/
3132
if (kvm_is_linear_rip(vcpu, vcpu->arch.cui_linear_rip))
3133
svm->vmcb->control.bus_lock_counter = 1;
3134
3135
return 1;
3136
}
3137
3138
static int bus_lock_exit(struct kvm_vcpu *vcpu)
3139
{
3140
struct vcpu_svm *svm = to_svm(vcpu);
3141
3142
vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
3143
vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
3144
3145
vcpu->arch.cui_linear_rip = kvm_get_linear_rip(vcpu);
3146
vcpu->arch.complete_userspace_io = complete_userspace_buslock;
3147
3148
if (is_guest_mode(vcpu))
3149
svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip;
3150
3151
return 0;
3152
}
3153
3154
static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3155
[SVM_EXIT_READ_CR0] = cr_interception,
3156
[SVM_EXIT_READ_CR3] = cr_interception,
3157
[SVM_EXIT_READ_CR4] = cr_interception,
3158
[SVM_EXIT_READ_CR8] = cr_interception,
3159
[SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
3160
[SVM_EXIT_WRITE_CR0] = cr_interception,
3161
[SVM_EXIT_WRITE_CR3] = cr_interception,
3162
[SVM_EXIT_WRITE_CR4] = cr_interception,
3163
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
3164
[SVM_EXIT_READ_DR0] = dr_interception,
3165
[SVM_EXIT_READ_DR1] = dr_interception,
3166
[SVM_EXIT_READ_DR2] = dr_interception,
3167
[SVM_EXIT_READ_DR3] = dr_interception,
3168
[SVM_EXIT_READ_DR4] = dr_interception,
3169
[SVM_EXIT_READ_DR5] = dr_interception,
3170
[SVM_EXIT_READ_DR6] = dr_interception,
3171
[SVM_EXIT_READ_DR7] = dr_interception,
3172
[SVM_EXIT_WRITE_DR0] = dr_interception,
3173
[SVM_EXIT_WRITE_DR1] = dr_interception,
3174
[SVM_EXIT_WRITE_DR2] = dr_interception,
3175
[SVM_EXIT_WRITE_DR3] = dr_interception,
3176
[SVM_EXIT_WRITE_DR4] = dr_interception,
3177
[SVM_EXIT_WRITE_DR5] = dr_interception,
3178
[SVM_EXIT_WRITE_DR6] = dr_interception,
3179
[SVM_EXIT_WRITE_DR7] = dr_interception,
3180
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3181
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
3182
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
3183
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3184
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3185
[SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
3186
[SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
3187
[SVM_EXIT_INTR] = intr_interception,
3188
[SVM_EXIT_NMI] = nmi_interception,
3189
[SVM_EXIT_SMI] = smi_interception,
3190
[SVM_EXIT_VINTR] = interrupt_window_interception,
3191
[SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
3192
[SVM_EXIT_CPUID] = kvm_emulate_cpuid,
3193
[SVM_EXIT_IRET] = iret_interception,
3194
[SVM_EXIT_INVD] = kvm_emulate_invd,
3195
[SVM_EXIT_PAUSE] = pause_interception,
3196
[SVM_EXIT_HLT] = kvm_emulate_halt,
3197
[SVM_EXIT_INVLPG] = invlpg_interception,
3198
[SVM_EXIT_INVLPGA] = invlpga_interception,
3199
[SVM_EXIT_IOIO] = io_interception,
3200
[SVM_EXIT_MSR] = msr_interception,
3201
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
3202
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
3203
[SVM_EXIT_VMRUN] = vmrun_interception,
3204
[SVM_EXIT_VMMCALL] = kvm_emulate_hypercall,
3205
[SVM_EXIT_VMLOAD] = vmload_interception,
3206
[SVM_EXIT_VMSAVE] = vmsave_interception,
3207
[SVM_EXIT_STGI] = stgi_interception,
3208
[SVM_EXIT_CLGI] = clgi_interception,
3209
[SVM_EXIT_SKINIT] = skinit_interception,
3210
[SVM_EXIT_RDTSCP] = kvm_handle_invalid_op,
3211
[SVM_EXIT_WBINVD] = kvm_emulate_wbinvd,
3212
[SVM_EXIT_MONITOR] = kvm_emulate_monitor,
3213
[SVM_EXIT_MWAIT] = kvm_emulate_mwait,
3214
[SVM_EXIT_XSETBV] = kvm_emulate_xsetbv,
3215
[SVM_EXIT_RDPRU] = kvm_handle_invalid_op,
3216
[SVM_EXIT_EFER_WRITE_TRAP] = efer_trap,
3217
[SVM_EXIT_CR0_WRITE_TRAP] = cr_trap,
3218
[SVM_EXIT_CR4_WRITE_TRAP] = cr_trap,
3219
[SVM_EXIT_CR8_WRITE_TRAP] = cr_trap,
3220
[SVM_EXIT_INVPCID] = invpcid_interception,
3221
[SVM_EXIT_IDLE_HLT] = kvm_emulate_halt,
3222
[SVM_EXIT_NPF] = npf_interception,
3223
[SVM_EXIT_BUS_LOCK] = bus_lock_exit,
3224
[SVM_EXIT_RSM] = rsm_interception,
3225
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
3226
[SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
3227
#ifdef CONFIG_KVM_AMD_SEV
3228
[SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
3229
#endif
3230
};
3231
3232
static void dump_vmcb(struct kvm_vcpu *vcpu)
3233
{
3234
struct vcpu_svm *svm = to_svm(vcpu);
3235
struct vmcb_control_area *control = &svm->vmcb->control;
3236
struct vmcb_save_area *save = &svm->vmcb->save;
3237
struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3238
char *vm_type;
3239
3240
if (!dump_invalid_vmcb) {
3241
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3242
return;
3243
}
3244
3245
guard(mutex)(&vmcb_dump_mutex);
3246
3247
vm_type = sev_snp_guest(vcpu->kvm) ? "SEV-SNP" :
3248
sev_es_guest(vcpu->kvm) ? "SEV-ES" :
3249
sev_guest(vcpu->kvm) ? "SEV" : "SVM";
3250
3251
pr_err("%s vCPU%u VMCB %p, last attempted VMRUN on CPU %d\n",
3252
vm_type, vcpu->vcpu_id, svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3253
pr_err("VMCB Control Area:\n");
3254
pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3255
pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3256
pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3257
pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3258
pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3259
pr_err("%-20s%08x %08x\n", "intercepts:",
3260
control->intercepts[INTERCEPT_WORD3],
3261
control->intercepts[INTERCEPT_WORD4]);
3262
pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3263
pr_err("%-20s%d\n", "pause filter threshold:",
3264
control->pause_filter_thresh);
3265
pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3266
pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3267
pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3268
pr_err("%-20s%d\n", "asid:", control->asid);
3269
pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3270
pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3271
pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3272
pr_err("%-20s%08x\n", "int_state:", control->int_state);
3273
pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3274
pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3275
pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3276
pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3277
pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3278
pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3279
pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3280
pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3281
pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3282
pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3283
pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3284
pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
3285
pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3286
pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3287
pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3288
pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3289
pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3290
pr_err("%-20s%016llx\n", "allowed_sev_features:", control->allowed_sev_features);
3291
pr_err("%-20s%016llx\n", "guest_sev_features:", control->guest_sev_features);
3292
3293
if (sev_es_guest(vcpu->kvm)) {
3294
save = sev_decrypt_vmsa(vcpu);
3295
if (!save)
3296
goto no_vmsa;
3297
3298
save01 = save;
3299
}
3300
3301
pr_err("VMCB State Save Area:\n");
3302
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3303
"es:",
3304
save->es.selector, save->es.attrib,
3305
save->es.limit, save->es.base);
3306
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3307
"cs:",
3308
save->cs.selector, save->cs.attrib,
3309
save->cs.limit, save->cs.base);
3310
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3311
"ss:",
3312
save->ss.selector, save->ss.attrib,
3313
save->ss.limit, save->ss.base);
3314
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3315
"ds:",
3316
save->ds.selector, save->ds.attrib,
3317
save->ds.limit, save->ds.base);
3318
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3319
"fs:",
3320
save01->fs.selector, save01->fs.attrib,
3321
save01->fs.limit, save01->fs.base);
3322
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3323
"gs:",
3324
save01->gs.selector, save01->gs.attrib,
3325
save01->gs.limit, save01->gs.base);
3326
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3327
"gdtr:",
3328
save->gdtr.selector, save->gdtr.attrib,
3329
save->gdtr.limit, save->gdtr.base);
3330
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3331
"ldtr:",
3332
save01->ldtr.selector, save01->ldtr.attrib,
3333
save01->ldtr.limit, save01->ldtr.base);
3334
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3335
"idtr:",
3336
save->idtr.selector, save->idtr.attrib,
3337
save->idtr.limit, save->idtr.base);
3338
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3339
"tr:",
3340
save01->tr.selector, save01->tr.attrib,
3341
save01->tr.limit, save01->tr.base);
3342
pr_err("vmpl: %d cpl: %d efer: %016llx\n",
3343
save->vmpl, save->cpl, save->efer);
3344
pr_err("%-15s %016llx %-13s %016llx\n",
3345
"cr0:", save->cr0, "cr2:", save->cr2);
3346
pr_err("%-15s %016llx %-13s %016llx\n",
3347
"cr3:", save->cr3, "cr4:", save->cr4);
3348
pr_err("%-15s %016llx %-13s %016llx\n",
3349
"dr6:", save->dr6, "dr7:", save->dr7);
3350
pr_err("%-15s %016llx %-13s %016llx\n",
3351
"rip:", save->rip, "rflags:", save->rflags);
3352
pr_err("%-15s %016llx %-13s %016llx\n",
3353
"rsp:", save->rsp, "rax:", save->rax);
3354
pr_err("%-15s %016llx %-13s %016llx\n",
3355
"s_cet:", save->s_cet, "ssp:", save->ssp);
3356
pr_err("%-15s %016llx\n",
3357
"isst_addr:", save->isst_addr);
3358
pr_err("%-15s %016llx %-13s %016llx\n",
3359
"star:", save01->star, "lstar:", save01->lstar);
3360
pr_err("%-15s %016llx %-13s %016llx\n",
3361
"cstar:", save01->cstar, "sfmask:", save01->sfmask);
3362
pr_err("%-15s %016llx %-13s %016llx\n",
3363
"kernel_gs_base:", save01->kernel_gs_base,
3364
"sysenter_cs:", save01->sysenter_cs);
3365
pr_err("%-15s %016llx %-13s %016llx\n",
3366
"sysenter_esp:", save01->sysenter_esp,
3367
"sysenter_eip:", save01->sysenter_eip);
3368
pr_err("%-15s %016llx %-13s %016llx\n",
3369
"gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3370
pr_err("%-15s %016llx %-13s %016llx\n",
3371
"br_from:", save->br_from, "br_to:", save->br_to);
3372
pr_err("%-15s %016llx %-13s %016llx\n",
3373
"excp_from:", save->last_excp_from,
3374
"excp_to:", save->last_excp_to);
3375
3376
if (sev_es_guest(vcpu->kvm)) {
3377
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)save;
3378
3379
pr_err("%-15s %016llx\n",
3380
"sev_features", vmsa->sev_features);
3381
3382
pr_err("%-15s %016llx %-13s %016llx\n",
3383
"pl0_ssp:", vmsa->pl0_ssp, "pl1_ssp:", vmsa->pl1_ssp);
3384
pr_err("%-15s %016llx %-13s %016llx\n",
3385
"pl2_ssp:", vmsa->pl2_ssp, "pl3_ssp:", vmsa->pl3_ssp);
3386
pr_err("%-15s %016llx\n",
3387
"u_cet:", vmsa->u_cet);
3388
3389
pr_err("%-15s %016llx %-13s %016llx\n",
3390
"rax:", vmsa->rax, "rbx:", vmsa->rbx);
3391
pr_err("%-15s %016llx %-13s %016llx\n",
3392
"rcx:", vmsa->rcx, "rdx:", vmsa->rdx);
3393
pr_err("%-15s %016llx %-13s %016llx\n",
3394
"rsi:", vmsa->rsi, "rdi:", vmsa->rdi);
3395
pr_err("%-15s %016llx %-13s %016llx\n",
3396
"rbp:", vmsa->rbp, "rsp:", vmsa->rsp);
3397
pr_err("%-15s %016llx %-13s %016llx\n",
3398
"r8:", vmsa->r8, "r9:", vmsa->r9);
3399
pr_err("%-15s %016llx %-13s %016llx\n",
3400
"r10:", vmsa->r10, "r11:", vmsa->r11);
3401
pr_err("%-15s %016llx %-13s %016llx\n",
3402
"r12:", vmsa->r12, "r13:", vmsa->r13);
3403
pr_err("%-15s %016llx %-13s %016llx\n",
3404
"r14:", vmsa->r14, "r15:", vmsa->r15);
3405
pr_err("%-15s %016llx %-13s %016llx\n",
3406
"xcr0:", vmsa->xcr0, "xss:", vmsa->xss);
3407
} else {
3408
pr_err("%-15s %016llx %-13s %016lx\n",
3409
"rax:", save->rax, "rbx:",
3410
vcpu->arch.regs[VCPU_REGS_RBX]);
3411
pr_err("%-15s %016lx %-13s %016lx\n",
3412
"rcx:", vcpu->arch.regs[VCPU_REGS_RCX],
3413
"rdx:", vcpu->arch.regs[VCPU_REGS_RDX]);
3414
pr_err("%-15s %016lx %-13s %016lx\n",
3415
"rsi:", vcpu->arch.regs[VCPU_REGS_RSI],
3416
"rdi:", vcpu->arch.regs[VCPU_REGS_RDI]);
3417
pr_err("%-15s %016lx %-13s %016llx\n",
3418
"rbp:", vcpu->arch.regs[VCPU_REGS_RBP],
3419
"rsp:", save->rsp);
3420
#ifdef CONFIG_X86_64
3421
pr_err("%-15s %016lx %-13s %016lx\n",
3422
"r8:", vcpu->arch.regs[VCPU_REGS_R8],
3423
"r9:", vcpu->arch.regs[VCPU_REGS_R9]);
3424
pr_err("%-15s %016lx %-13s %016lx\n",
3425
"r10:", vcpu->arch.regs[VCPU_REGS_R10],
3426
"r11:", vcpu->arch.regs[VCPU_REGS_R11]);
3427
pr_err("%-15s %016lx %-13s %016lx\n",
3428
"r12:", vcpu->arch.regs[VCPU_REGS_R12],
3429
"r13:", vcpu->arch.regs[VCPU_REGS_R13]);
3430
pr_err("%-15s %016lx %-13s %016lx\n",
3431
"r14:", vcpu->arch.regs[VCPU_REGS_R14],
3432
"r15:", vcpu->arch.regs[VCPU_REGS_R15]);
3433
#endif
3434
}
3435
3436
no_vmsa:
3437
if (sev_es_guest(vcpu->kvm))
3438
sev_free_decrypted_vmsa(vcpu, save);
3439
}
3440
3441
static bool svm_check_exit_valid(u64 exit_code)
3442
{
3443
return (exit_code < ARRAY_SIZE(svm_exit_handlers) &&
3444
svm_exit_handlers[exit_code]);
3445
}
3446
3447
static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
3448
{
3449
vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
3450
dump_vmcb(vcpu);
3451
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3452
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3453
vcpu->run->internal.ndata = 2;
3454
vcpu->run->internal.data[0] = exit_code;
3455
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3456
return 0;
3457
}
3458
3459
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
3460
{
3461
if (!svm_check_exit_valid(exit_code))
3462
return svm_handle_invalid_exit(vcpu, exit_code);
3463
3464
#ifdef CONFIG_MITIGATION_RETPOLINE
3465
if (exit_code == SVM_EXIT_MSR)
3466
return msr_interception(vcpu);
3467
else if (exit_code == SVM_EXIT_VINTR)
3468
return interrupt_window_interception(vcpu);
3469
else if (exit_code == SVM_EXIT_INTR)
3470
return intr_interception(vcpu);
3471
else if (exit_code == SVM_EXIT_HLT || exit_code == SVM_EXIT_IDLE_HLT)
3472
return kvm_emulate_halt(vcpu);
3473
else if (exit_code == SVM_EXIT_NPF)
3474
return npf_interception(vcpu);
3475
#ifdef CONFIG_KVM_AMD_SEV
3476
else if (exit_code == SVM_EXIT_VMGEXIT)
3477
return sev_handle_vmgexit(vcpu);
3478
#endif
3479
#endif
3480
return svm_exit_handlers[exit_code](vcpu);
3481
}
3482
3483
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3484
u64 *info1, u64 *info2,
3485
u32 *intr_info, u32 *error_code)
3486
{
3487
struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3488
3489
*reason = control->exit_code;
3490
*info1 = control->exit_info_1;
3491
*info2 = control->exit_info_2;
3492
*intr_info = control->exit_int_info;
3493
if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3494
(*intr_info & SVM_EXITINTINFO_VALID_ERR))
3495
*error_code = control->exit_int_info_err;
3496
else
3497
*error_code = 0;
3498
}
3499
3500
static void svm_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info,
3501
u32 *error_code)
3502
{
3503
struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3504
3505
*intr_info = control->event_inj;
3506
3507
if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3508
(*intr_info & SVM_EXITINTINFO_VALID_ERR))
3509
*error_code = control->event_inj_err;
3510
else
3511
*error_code = 0;
3512
3513
}
3514
3515
static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3516
{
3517
struct vcpu_svm *svm = to_svm(vcpu);
3518
struct kvm_run *kvm_run = vcpu->run;
3519
u32 exit_code = svm->vmcb->control.exit_code;
3520
3521
/* SEV-ES guests must use the CR write traps to track CR registers. */
3522
if (!sev_es_guest(vcpu->kvm)) {
3523
if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3524
vcpu->arch.cr0 = svm->vmcb->save.cr0;
3525
if (npt_enabled)
3526
vcpu->arch.cr3 = svm->vmcb->save.cr3;
3527
}
3528
3529
if (is_guest_mode(vcpu)) {
3530
int vmexit;
3531
3532
trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3533
3534
vmexit = nested_svm_exit_special(svm);
3535
3536
if (vmexit == NESTED_EXIT_CONTINUE)
3537
vmexit = nested_svm_exit_handled(svm);
3538
3539
if (vmexit == NESTED_EXIT_DONE)
3540
return 1;
3541
}
3542
3543
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3544
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3545
kvm_run->fail_entry.hardware_entry_failure_reason
3546
= svm->vmcb->control.exit_code;
3547
kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3548
dump_vmcb(vcpu);
3549
return 0;
3550
}
3551
3552
if (exit_fastpath != EXIT_FASTPATH_NONE)
3553
return 1;
3554
3555
return svm_invoke_exit_handler(vcpu, exit_code);
3556
}
3557
3558
static int pre_svm_run(struct kvm_vcpu *vcpu)
3559
{
3560
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3561
struct vcpu_svm *svm = to_svm(vcpu);
3562
3563
/*
3564
* If the previous vmrun of the vmcb occurred on a different physical
3565
* cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3566
* vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3567
*/
3568
if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3569
svm->current_vmcb->asid_generation = 0;
3570
vmcb_mark_all_dirty(svm->vmcb);
3571
svm->current_vmcb->cpu = vcpu->cpu;
3572
}
3573
3574
if (sev_guest(vcpu->kvm))
3575
return pre_sev_run(svm, vcpu->cpu);
3576
3577
/* FIXME: handle wraparound of asid_generation */
3578
if (svm->current_vmcb->asid_generation != sd->asid_generation)
3579
new_asid(svm, sd);
3580
3581
return 0;
3582
}
3583
3584
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3585
{
3586
struct vcpu_svm *svm = to_svm(vcpu);
3587
3588
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3589
3590
if (svm->nmi_l1_to_l2)
3591
return;
3592
3593
/*
3594
* No need to manually track NMI masking when vNMI is enabled, hardware
3595
* automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
3596
* case where software directly injects an NMI.
3597
*/
3598
if (!is_vnmi_enabled(svm)) {
3599
svm->nmi_masked = true;
3600
svm_set_iret_intercept(svm);
3601
}
3602
++vcpu->stat.nmi_injections;
3603
}
3604
3605
static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
3606
{
3607
struct vcpu_svm *svm = to_svm(vcpu);
3608
3609
if (!is_vnmi_enabled(svm))
3610
return false;
3611
3612
return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3613
}
3614
3615
static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
3616
{
3617
struct vcpu_svm *svm = to_svm(vcpu);
3618
3619
if (!is_vnmi_enabled(svm))
3620
return false;
3621
3622
if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3623
return false;
3624
3625
svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3626
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3627
3628
/*
3629
* Because the pending NMI is serviced by hardware, KVM can't know when
3630
* the NMI is "injected", but for all intents and purposes, passing the
3631
* NMI off to hardware counts as injection.
3632
*/
3633
++vcpu->stat.nmi_injections;
3634
3635
return true;
3636
}
3637
3638
static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3639
{
3640
struct vcpu_svm *svm = to_svm(vcpu);
3641
u32 type;
3642
3643
if (vcpu->arch.interrupt.soft) {
3644
if (svm_update_soft_interrupt_rip(vcpu))
3645
return;
3646
3647
type = SVM_EVTINJ_TYPE_SOFT;
3648
} else {
3649
type = SVM_EVTINJ_TYPE_INTR;
3650
}
3651
3652
trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
3653
vcpu->arch.interrupt.soft, reinjected);
3654
++vcpu->stat.irq_injections;
3655
3656
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3657
SVM_EVTINJ_VALID | type;
3658
}
3659
3660
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3661
int trig_mode, int vector)
3662
{
3663
/*
3664
* apic->apicv_active must be read after vcpu->mode.
3665
* Pairs with smp_store_release in vcpu_enter_guest.
3666
*/
3667
bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3668
3669
/* Note, this is called iff the local APIC is in-kernel. */
3670
if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3671
/* Process the interrupt via kvm_check_and_inject_events(). */
3672
kvm_make_request(KVM_REQ_EVENT, vcpu);
3673
kvm_vcpu_kick(vcpu);
3674
return;
3675
}
3676
3677
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3678
if (in_guest_mode) {
3679
/*
3680
* Signal the doorbell to tell hardware to inject the IRQ. If
3681
* the vCPU exits the guest before the doorbell chimes, hardware
3682
* will automatically process AVIC interrupts at the next VMRUN.
3683
*/
3684
avic_ring_doorbell(vcpu);
3685
} else {
3686
/*
3687
* Wake the vCPU if it was blocking. KVM will then detect the
3688
* pending IRQ when checking if the vCPU has a wake event.
3689
*/
3690
kvm_vcpu_wake_up(vcpu);
3691
}
3692
}
3693
3694
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
3695
int trig_mode, int vector)
3696
{
3697
kvm_lapic_set_irr(vector, apic);
3698
3699
/*
3700
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3701
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3702
* the read of guest_mode. This guarantees that either VMRUN will see
3703
* and process the new vIRR entry, or that svm_complete_interrupt_delivery
3704
* will signal the doorbell if the CPU has already entered the guest.
3705
*/
3706
smp_mb__after_atomic();
3707
svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3708
}
3709
3710
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3711
{
3712
struct vcpu_svm *svm = to_svm(vcpu);
3713
3714
/*
3715
* SEV-ES guests must always keep the CR intercepts cleared. CR
3716
* tracking is done using the CR write traps.
3717
*/
3718
if (sev_es_guest(vcpu->kvm))
3719
return;
3720
3721
if (nested_svm_virtualize_tpr(vcpu))
3722
return;
3723
3724
svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3725
3726
if (irr == -1)
3727
return;
3728
3729
if (tpr >= irr)
3730
svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3731
}
3732
3733
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3734
{
3735
struct vcpu_svm *svm = to_svm(vcpu);
3736
3737
if (is_vnmi_enabled(svm))
3738
return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3739
else
3740
return svm->nmi_masked;
3741
}
3742
3743
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3744
{
3745
struct vcpu_svm *svm = to_svm(vcpu);
3746
3747
if (is_vnmi_enabled(svm)) {
3748
if (masked)
3749
svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3750
else
3751
svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3752
3753
} else {
3754
svm->nmi_masked = masked;
3755
if (masked)
3756
svm_set_iret_intercept(svm);
3757
else
3758
svm_clr_iret_intercept(svm);
3759
}
3760
}
3761
3762
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3763
{
3764
struct vcpu_svm *svm = to_svm(vcpu);
3765
struct vmcb *vmcb = svm->vmcb;
3766
3767
if (!gif_set(svm))
3768
return true;
3769
3770
if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3771
return false;
3772
3773
if (svm_get_nmi_mask(vcpu))
3774
return true;
3775
3776
return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3777
}
3778
3779
static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3780
{
3781
struct vcpu_svm *svm = to_svm(vcpu);
3782
if (svm->nested.nested_run_pending)
3783
return -EBUSY;
3784
3785
if (svm_nmi_blocked(vcpu))
3786
return 0;
3787
3788
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
3789
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3790
return -EBUSY;
3791
return 1;
3792
}
3793
3794
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3795
{
3796
struct vcpu_svm *svm = to_svm(vcpu);
3797
struct vmcb *vmcb = svm->vmcb;
3798
3799
if (!gif_set(svm))
3800
return true;
3801
3802
if (is_guest_mode(vcpu)) {
3803
/* As long as interrupts are being delivered... */
3804
if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3805
? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3806
: !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3807
return true;
3808
3809
/* ... vmexits aren't blocked by the interrupt shadow */
3810
if (nested_exit_on_intr(svm))
3811
return false;
3812
} else {
3813
if (!svm_get_if_flag(vcpu))
3814
return true;
3815
}
3816
3817
return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3818
}
3819
3820
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3821
{
3822
struct vcpu_svm *svm = to_svm(vcpu);
3823
3824
if (svm->nested.nested_run_pending)
3825
return -EBUSY;
3826
3827
if (svm_interrupt_blocked(vcpu))
3828
return 0;
3829
3830
/*
3831
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3832
* e.g. if the IRQ arrived asynchronously after checking nested events.
3833
*/
3834
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
3835
return -EBUSY;
3836
3837
return 1;
3838
}
3839
3840
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
3841
{
3842
struct vcpu_svm *svm = to_svm(vcpu);
3843
3844
/*
3845
* In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3846
* 1, because that's a separate STGI/VMRUN intercept. The next time we
3847
* get that intercept, this function will be called again though and
3848
* we'll get the vintr intercept. However, if the vGIF feature is
3849
* enabled, the STGI interception will not occur. Enable the irq
3850
* window under the assumption that the hardware will set the GIF.
3851
*/
3852
if (vgif || gif_set(svm)) {
3853
/*
3854
* IRQ window is not needed when AVIC is enabled,
3855
* unless we have pending ExtINT since it cannot be injected
3856
* via AVIC. In such case, KVM needs to temporarily disable AVIC,
3857
* and fallback to injecting IRQ via V_IRQ.
3858
*
3859
* If running nested, AVIC is already locally inhibited
3860
* on this vCPU, therefore there is no need to request
3861
* the VM wide AVIC inhibition.
3862
*/
3863
if (!is_guest_mode(vcpu))
3864
kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3865
3866
svm_set_vintr(svm);
3867
}
3868
}
3869
3870
static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
3871
{
3872
struct vcpu_svm *svm = to_svm(vcpu);
3873
3874
/*
3875
* If NMIs are outright masked, i.e. the vCPU is already handling an
3876
* NMI, and KVM has not yet intercepted an IRET, then there is nothing
3877
* more to do at this time as KVM has already enabled IRET intercepts.
3878
* If KVM has already intercepted IRET, then single-step over the IRET,
3879
* as NMIs aren't architecturally unmasked until the IRET completes.
3880
*
3881
* If vNMI is enabled, KVM should never request an NMI window if NMIs
3882
* are masked, as KVM allows at most one to-be-injected NMI and one
3883
* pending NMI. If two NMIs arrive simultaneously, KVM will inject one
3884
* NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
3885
* unmasked. KVM _will_ request an NMI window in some situations, e.g.
3886
* if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
3887
* inject the NMI. In those situations, KVM needs to single-step over
3888
* the STI shadow or intercept STGI.
3889
*/
3890
if (svm_get_nmi_mask(vcpu)) {
3891
WARN_ON_ONCE(is_vnmi_enabled(svm));
3892
3893
if (!svm->awaiting_iret_completion)
3894
return; /* IRET will cause a vm exit */
3895
}
3896
3897
/*
3898
* SEV-ES guests are responsible for signaling when a vCPU is ready to
3899
* receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
3900
* KVM can't intercept and single-step IRET to detect when NMIs are
3901
* unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE.
3902
*
3903
* Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
3904
* ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
3905
* supported NAEs in the GHCB protocol.
3906
*/
3907
if (sev_es_guest(vcpu->kvm))
3908
return;
3909
3910
if (!gif_set(svm)) {
3911
if (vgif)
3912
svm_set_intercept(svm, INTERCEPT_STGI);
3913
return; /* STGI will cause a vm exit */
3914
}
3915
3916
/*
3917
* Something prevents NMI from been injected. Single step over possible
3918
* problem (IRET or exception injection or interrupt shadow)
3919
*/
3920
svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3921
svm->nmi_singlestep = true;
3922
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3923
}
3924
3925
static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
3926
{
3927
struct vcpu_svm *svm = to_svm(vcpu);
3928
3929
/*
3930
* Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
3931
* A TLB flush for the current ASID flushes both "host" and "guest" TLB
3932
* entries, and thus is a superset of Hyper-V's fine grained flushing.
3933
*/
3934
kvm_hv_vcpu_purge_flush_tlb(vcpu);
3935
3936
/*
3937
* Flush only the current ASID even if the TLB flush was invoked via
3938
* kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
3939
* ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3940
* unconditionally does a TLB flush on both nested VM-Enter and nested
3941
* VM-Exit (via kvm_mmu_reset_context()).
3942
*/
3943
if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3944
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3945
else
3946
svm->current_vmcb->asid_generation--;
3947
}
3948
3949
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
3950
{
3951
hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
3952
3953
/*
3954
* When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
3955
* flush the NPT mappings via hypercall as flushing the ASID only
3956
* affects virtual to physical mappings, it does not invalidate guest
3957
* physical to host physical mappings.
3958
*/
3959
if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
3960
hyperv_flush_guest_mapping(root_tdp);
3961
3962
svm_flush_tlb_asid(vcpu);
3963
}
3964
3965
static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
3966
{
3967
/*
3968
* When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
3969
* flushes should be routed to hv_flush_remote_tlbs() without requesting
3970
* a "regular" remote flush. Reaching this point means either there's
3971
* a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
3972
* which might be fatal to the guest. Yell, but try to recover.
3973
*/
3974
if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
3975
hv_flush_remote_tlbs(vcpu->kvm);
3976
3977
svm_flush_tlb_asid(vcpu);
3978
}
3979
3980
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3981
{
3982
struct vcpu_svm *svm = to_svm(vcpu);
3983
3984
invlpga(gva, svm->vmcb->control.asid);
3985
}
3986
3987
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3988
{
3989
struct vcpu_svm *svm = to_svm(vcpu);
3990
3991
if (nested_svm_virtualize_tpr(vcpu))
3992
return;
3993
3994
if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
3995
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3996
kvm_set_cr8(vcpu, cr8);
3997
}
3998
}
3999
4000
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
4001
{
4002
struct vcpu_svm *svm = to_svm(vcpu);
4003
u64 cr8;
4004
4005
if (nested_svm_virtualize_tpr(vcpu))
4006
return;
4007
4008
cr8 = kvm_get_cr8(vcpu);
4009
svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
4010
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
4011
}
4012
4013
static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
4014
int type)
4015
{
4016
bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
4017
bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
4018
struct vcpu_svm *svm = to_svm(vcpu);
4019
4020
/*
4021
* If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
4022
* associated with the original soft exception/interrupt. next_rip is
4023
* cleared on all exits that can occur while vectoring an event, so KVM
4024
* needs to manually set next_rip for re-injection. Unlike the !nrips
4025
* case below, this needs to be done if and only if KVM is re-injecting
4026
* the same event, i.e. if the event is a soft exception/interrupt,
4027
* otherwise next_rip is unused on VMRUN.
4028
*/
4029
if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
4030
kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
4031
svm->vmcb->control.next_rip = svm->soft_int_next_rip;
4032
/*
4033
* If NRIPS isn't enabled, KVM must manually advance RIP prior to
4034
* injecting the soft exception/interrupt. That advancement needs to
4035
* be unwound if vectoring didn't complete. Note, the new event may
4036
* not be the injected event, e.g. if KVM injected an INTn, the INTn
4037
* hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
4038
* be the reported vectored event, but RIP still needs to be unwound.
4039
*/
4040
else if (!nrips && (is_soft || is_exception) &&
4041
kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
4042
kvm_rip_write(vcpu, svm->soft_int_old_rip);
4043
}
4044
4045
static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
4046
{
4047
struct vcpu_svm *svm = to_svm(vcpu);
4048
u8 vector;
4049
int type;
4050
u32 exitintinfo = svm->vmcb->control.exit_int_info;
4051
bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
4052
bool soft_int_injected = svm->soft_int_injected;
4053
4054
svm->nmi_l1_to_l2 = false;
4055
svm->soft_int_injected = false;
4056
4057
/*
4058
* If we've made progress since setting awaiting_iret_completion, we've
4059
* executed an IRET and can allow NMI injection.
4060
*/
4061
if (svm->awaiting_iret_completion &&
4062
kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
4063
svm->awaiting_iret_completion = false;
4064
svm->nmi_masked = false;
4065
kvm_make_request(KVM_REQ_EVENT, vcpu);
4066
}
4067
4068
vcpu->arch.nmi_injected = false;
4069
kvm_clear_exception_queue(vcpu);
4070
kvm_clear_interrupt_queue(vcpu);
4071
4072
if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4073
return;
4074
4075
kvm_make_request(KVM_REQ_EVENT, vcpu);
4076
4077
vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4078
type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4079
4080
if (soft_int_injected)
4081
svm_complete_soft_interrupt(vcpu, vector, type);
4082
4083
switch (type) {
4084
case SVM_EXITINTINFO_TYPE_NMI:
4085
vcpu->arch.nmi_injected = true;
4086
svm->nmi_l1_to_l2 = nmi_l1_to_l2;
4087
break;
4088
case SVM_EXITINTINFO_TYPE_EXEPT: {
4089
u32 error_code = 0;
4090
4091
/*
4092
* Never re-inject a #VC exception.
4093
*/
4094
if (vector == X86_TRAP_VC)
4095
break;
4096
4097
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR)
4098
error_code = svm->vmcb->control.exit_int_info_err;
4099
4100
kvm_requeue_exception(vcpu, vector,
4101
exitintinfo & SVM_EXITINTINFO_VALID_ERR,
4102
error_code);
4103
break;
4104
}
4105
case SVM_EXITINTINFO_TYPE_INTR:
4106
kvm_queue_interrupt(vcpu, vector, false);
4107
break;
4108
case SVM_EXITINTINFO_TYPE_SOFT:
4109
kvm_queue_interrupt(vcpu, vector, true);
4110
break;
4111
default:
4112
break;
4113
}
4114
4115
}
4116
4117
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
4118
{
4119
struct vcpu_svm *svm = to_svm(vcpu);
4120
struct vmcb_control_area *control = &svm->vmcb->control;
4121
4122
control->exit_int_info = control->event_inj;
4123
control->exit_int_info_err = control->event_inj_err;
4124
control->event_inj = 0;
4125
svm_complete_interrupts(vcpu);
4126
}
4127
4128
static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
4129
{
4130
if (to_kvm_sev_info(vcpu->kvm)->need_init)
4131
return -EINVAL;
4132
4133
return 1;
4134
}
4135
4136
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
4137
{
4138
struct vcpu_svm *svm = to_svm(vcpu);
4139
struct vmcb_control_area *control = &svm->vmcb->control;
4140
4141
/*
4142
* Next RIP must be provided as IRQs are disabled, and accessing guest
4143
* memory to decode the instruction might fault, i.e. might sleep.
4144
*/
4145
if (!nrips || !control->next_rip)
4146
return EXIT_FASTPATH_NONE;
4147
4148
if (is_guest_mode(vcpu))
4149
return EXIT_FASTPATH_NONE;
4150
4151
switch (control->exit_code) {
4152
case SVM_EXIT_MSR:
4153
if (!control->exit_info_1)
4154
break;
4155
return handle_fastpath_wrmsr(vcpu);
4156
case SVM_EXIT_HLT:
4157
return handle_fastpath_hlt(vcpu);
4158
case SVM_EXIT_INVD:
4159
return handle_fastpath_invd(vcpu);
4160
default:
4161
break;
4162
}
4163
4164
return EXIT_FASTPATH_NONE;
4165
}
4166
4167
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4168
{
4169
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
4170
struct vcpu_svm *svm = to_svm(vcpu);
4171
4172
guest_state_enter_irqoff();
4173
4174
/*
4175
* Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
4176
* VMRUN controls whether or not physical IRQs are masked (KVM always
4177
* runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
4178
* temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
4179
* into guest state if delivery of an event during VMRUN triggers a
4180
* #VMEXIT, and the guest_state transitions already tell lockdep that
4181
* IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
4182
* this path, so IRQs aren't actually unmasked while running host code.
4183
*/
4184
raw_local_irq_enable();
4185
4186
amd_clear_divider();
4187
4188
if (sev_es_guest(vcpu->kvm))
4189
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
4190
sev_es_host_save_area(sd));
4191
else
4192
__svm_vcpu_run(svm, spec_ctrl_intercepted);
4193
4194
raw_local_irq_disable();
4195
4196
guest_state_exit_irqoff();
4197
}
4198
4199
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
4200
{
4201
bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT;
4202
struct vcpu_svm *svm = to_svm(vcpu);
4203
bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
4204
4205
trace_kvm_entry(vcpu, force_immediate_exit);
4206
4207
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4208
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4209
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4210
4211
/*
4212
* Disable singlestep if we're injecting an interrupt/exception.
4213
* We don't want our modified rflags to be pushed on the stack where
4214
* we might not be able to easily reset them if we disabled NMI
4215
* singlestep later.
4216
*/
4217
if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4218
/*
4219
* Event injection happens before external interrupts cause a
4220
* vmexit and interrupts are disabled here, so smp_send_reschedule
4221
* is enough to force an immediate vmexit.
4222
*/
4223
disable_nmi_singlestep(svm);
4224
force_immediate_exit = true;
4225
}
4226
4227
if (force_immediate_exit)
4228
smp_send_reschedule(vcpu->cpu);
4229
4230
if (pre_svm_run(vcpu)) {
4231
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4232
vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR;
4233
vcpu->run->fail_entry.cpu = vcpu->cpu;
4234
return EXIT_FASTPATH_EXIT_USERSPACE;
4235
}
4236
4237
sync_lapic_to_cr8(vcpu);
4238
4239
if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4240
svm->vmcb->control.asid = svm->asid;
4241
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4242
}
4243
svm->vmcb->save.cr2 = vcpu->arch.cr2;
4244
4245
svm_hv_update_vp_id(svm->vmcb, vcpu);
4246
4247
/*
4248
* Run with all-zero DR6 unless the guest can write DR6 freely, so that
4249
* KVM can get the exact cause of a #DB. Note, loading guest DR6 from
4250
* KVM's snapshot is only necessary when DR accesses won't exit.
4251
*/
4252
if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6))
4253
svm_set_dr6(vcpu, vcpu->arch.dr6);
4254
else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
4255
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
4256
4257
clgi();
4258
kvm_load_guest_xsave_state(vcpu);
4259
4260
/*
4261
* Hardware only context switches DEBUGCTL if LBR virtualization is
4262
* enabled. Manually load DEBUGCTL if necessary (and restore it after
4263
* VM-Exit), as running with the host's DEBUGCTL can negatively affect
4264
* guest state and can even be fatal, e.g. due to Bus Lock Detect.
4265
*/
4266
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
4267
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
4268
update_debugctlmsr(svm->vmcb->save.dbgctl);
4269
4270
kvm_wait_lapic_expire(vcpu);
4271
4272
/*
4273
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
4274
* it's non-zero. Since vmentry is serialising on affected CPUs, there
4275
* is no need to worry about the conditional branch over the wrmsr
4276
* being speculatively taken.
4277
*/
4278
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4279
x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
4280
4281
svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
4282
4283
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4284
x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
4285
4286
if (!sev_es_guest(vcpu->kvm)) {
4287
vcpu->arch.cr2 = svm->vmcb->save.cr2;
4288
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4289
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4290
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4291
}
4292
vcpu->arch.regs_dirty = 0;
4293
4294
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4295
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4296
4297
if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
4298
vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
4299
update_debugctlmsr(vcpu->arch.host_debugctl);
4300
4301
kvm_load_host_xsave_state(vcpu);
4302
stgi();
4303
4304
/* Any pending NMI will happen here */
4305
4306
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4307
kvm_after_interrupt(vcpu);
4308
4309
sync_cr8_to_lapic(vcpu);
4310
4311
svm->next_rip = 0;
4312
if (is_guest_mode(vcpu)) {
4313
nested_sync_control_from_vmcb02(svm);
4314
4315
/* Track VMRUNs that have made past consistency checking */
4316
if (svm->nested.nested_run_pending &&
4317
svm->vmcb->control.exit_code != SVM_EXIT_ERR)
4318
++vcpu->stat.nested_run;
4319
4320
svm->nested.nested_run_pending = 0;
4321
}
4322
4323
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4324
vmcb_mark_all_clean(svm->vmcb);
4325
4326
/* if exit due to PF check for async PF */
4327
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4328
vcpu->arch.apf.host_apf_flags =
4329
kvm_read_and_reset_apf_flags();
4330
4331
vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4332
4333
/*
4334
* We need to handle MC intercepts here before the vcpu has a chance to
4335
* change the physical cpu
4336
*/
4337
if (unlikely(svm->vmcb->control.exit_code ==
4338
SVM_EXIT_EXCP_BASE + MC_VECTOR))
4339
svm_handle_mce(vcpu);
4340
4341
trace_kvm_exit(vcpu, KVM_ISA_SVM);
4342
4343
svm_complete_interrupts(vcpu);
4344
4345
return svm_exit_handlers_fastpath(vcpu);
4346
}
4347
4348
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4349
int root_level)
4350
{
4351
struct vcpu_svm *svm = to_svm(vcpu);
4352
unsigned long cr3;
4353
4354
if (npt_enabled) {
4355
svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4356
vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4357
4358
hv_track_root_tdp(vcpu, root_hpa);
4359
4360
cr3 = vcpu->arch.cr3;
4361
} else if (root_level >= PT64_ROOT_4LEVEL) {
4362
cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4363
} else {
4364
/* PCID in the guest should be impossible with a 32-bit MMU. */
4365
WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4366
cr3 = root_hpa;
4367
}
4368
4369
svm->vmcb->save.cr3 = cr3;
4370
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4371
}
4372
4373
static void
4374
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4375
{
4376
/*
4377
* Patch in the VMMCALL instruction:
4378
*/
4379
hypercall[0] = 0x0f;
4380
hypercall[1] = 0x01;
4381
hypercall[2] = 0xd9;
4382
}
4383
4384
/*
4385
* The kvm parameter can be NULL (module initialization, or invocation before
4386
* VM creation). Be sure to check the kvm parameter before using it.
4387
*/
4388
static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4389
{
4390
switch (index) {
4391
case MSR_IA32_MCG_EXT_CTL:
4392
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
4393
return false;
4394
case MSR_IA32_SMBASE:
4395
if (!IS_ENABLED(CONFIG_KVM_SMM))
4396
return false;
4397
/* SEV-ES guests do not support SMM, so report false */
4398
if (kvm && sev_es_guest(kvm))
4399
return false;
4400
break;
4401
default:
4402
break;
4403
}
4404
4405
return true;
4406
}
4407
4408
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4409
{
4410
struct vcpu_svm *svm = to_svm(vcpu);
4411
4412
/*
4413
* SVM doesn't provide a way to disable just XSAVES in the guest, KVM
4414
* can only disable all variants of by disallowing CR4.OSXSAVE from
4415
* being set. As a result, if the host has XSAVE and XSAVES, and the
4416
* guest has XSAVE enabled, the guest can execute XSAVES without
4417
* faulting. Treat XSAVES as enabled in this case regardless of
4418
* whether it's advertised to the guest so that KVM context switches
4419
* XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
4420
* the guest read/write access to the host's XSS.
4421
*/
4422
guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES,
4423
boot_cpu_has(X86_FEATURE_XSAVES) &&
4424
guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVE));
4425
4426
/*
4427
* Intercept VMLOAD if the vCPU model is Intel in order to emulate that
4428
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
4429
* SVM on Intel is bonkers and extremely unlikely to work).
4430
*/
4431
if (guest_cpuid_is_intel_compatible(vcpu))
4432
guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4433
4434
if (sev_guest(vcpu->kvm))
4435
sev_vcpu_after_set_cpuid(svm);
4436
}
4437
4438
static bool svm_has_wbinvd_exit(void)
4439
{
4440
return true;
4441
}
4442
4443
#define PRE_EX(exit) { .exit_code = (exit), \
4444
.stage = X86_ICPT_PRE_EXCEPT, }
4445
#define POST_EX(exit) { .exit_code = (exit), \
4446
.stage = X86_ICPT_POST_EXCEPT, }
4447
#define POST_MEM(exit) { .exit_code = (exit), \
4448
.stage = X86_ICPT_POST_MEMACCESS, }
4449
4450
static const struct __x86_intercept {
4451
u32 exit_code;
4452
enum x86_intercept_stage stage;
4453
} x86_intercept_map[] = {
4454
[x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4455
[x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4456
[x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4457
[x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4458
[x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
4459
[x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4460
[x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
4461
[x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4462
[x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4463
[x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4464
[x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4465
[x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4466
[x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4467
[x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4468
[x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
4469
[x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4470
[x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4471
[x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4472
[x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4473
[x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4474
[x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4475
[x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4476
[x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
4477
[x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4478
[x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4479
[x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
4480
[x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4481
[x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4482
[x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4483
[x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4484
[x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4485
[x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4486
[x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4487
[x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4488
[x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
4489
[x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4490
[x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4491
[x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4492
[x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4493
[x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4494
[x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4495
[x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
4496
[x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4497
[x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4498
[x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4499
[x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
4500
[x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
4501
};
4502
4503
#undef PRE_EX
4504
#undef POST_EX
4505
#undef POST_MEM
4506
4507
static int svm_check_intercept(struct kvm_vcpu *vcpu,
4508
struct x86_instruction_info *info,
4509
enum x86_intercept_stage stage,
4510
struct x86_exception *exception)
4511
{
4512
struct vcpu_svm *svm = to_svm(vcpu);
4513
int vmexit, ret = X86EMUL_CONTINUE;
4514
struct __x86_intercept icpt_info;
4515
struct vmcb *vmcb = svm->vmcb;
4516
4517
if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4518
goto out;
4519
4520
icpt_info = x86_intercept_map[info->intercept];
4521
4522
if (stage != icpt_info.stage)
4523
goto out;
4524
4525
switch (icpt_info.exit_code) {
4526
case SVM_EXIT_READ_CR0:
4527
if (info->intercept == x86_intercept_cr_read)
4528
icpt_info.exit_code += info->modrm_reg;
4529
break;
4530
case SVM_EXIT_WRITE_CR0: {
4531
unsigned long cr0, val;
4532
4533
if (info->intercept == x86_intercept_cr_write)
4534
icpt_info.exit_code += info->modrm_reg;
4535
4536
if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4537
info->intercept == x86_intercept_clts)
4538
break;
4539
4540
if (!(vmcb12_is_intercept(&svm->nested.ctl,
4541
INTERCEPT_SELECTIVE_CR0)))
4542
break;
4543
4544
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4545
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4546
4547
if (info->intercept == x86_intercept_lmsw) {
4548
cr0 &= 0xfUL;
4549
val &= 0xfUL;
4550
/* lmsw can't clear PE - catch this here */
4551
if (cr0 & X86_CR0_PE)
4552
val |= X86_CR0_PE;
4553
}
4554
4555
if (cr0 ^ val)
4556
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4557
4558
break;
4559
}
4560
case SVM_EXIT_READ_DR0:
4561
case SVM_EXIT_WRITE_DR0:
4562
icpt_info.exit_code += info->modrm_reg;
4563
break;
4564
case SVM_EXIT_MSR:
4565
if (info->intercept == x86_intercept_wrmsr)
4566
vmcb->control.exit_info_1 = 1;
4567
else
4568
vmcb->control.exit_info_1 = 0;
4569
break;
4570
case SVM_EXIT_PAUSE:
4571
/*
4572
* We get this for NOP only, but pause
4573
* is rep not, check this here
4574
*/
4575
if (info->rep_prefix != REPE_PREFIX)
4576
goto out;
4577
break;
4578
case SVM_EXIT_IOIO: {
4579
u64 exit_info;
4580
u32 bytes;
4581
4582
if (info->intercept == x86_intercept_in ||
4583
info->intercept == x86_intercept_ins) {
4584
exit_info = ((info->src_val & 0xffff) << 16) |
4585
SVM_IOIO_TYPE_MASK;
4586
bytes = info->dst_bytes;
4587
} else {
4588
exit_info = (info->dst_val & 0xffff) << 16;
4589
bytes = info->src_bytes;
4590
}
4591
4592
if (info->intercept == x86_intercept_outs ||
4593
info->intercept == x86_intercept_ins)
4594
exit_info |= SVM_IOIO_STR_MASK;
4595
4596
if (info->rep_prefix)
4597
exit_info |= SVM_IOIO_REP_MASK;
4598
4599
bytes = min(bytes, 4u);
4600
4601
exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4602
4603
exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4604
4605
vmcb->control.exit_info_1 = exit_info;
4606
vmcb->control.exit_info_2 = info->next_rip;
4607
4608
break;
4609
}
4610
default:
4611
break;
4612
}
4613
4614
/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4615
if (static_cpu_has(X86_FEATURE_NRIPS))
4616
vmcb->control.next_rip = info->next_rip;
4617
vmcb->control.exit_code = icpt_info.exit_code;
4618
vmexit = nested_svm_exit_handled(svm);
4619
4620
ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4621
: X86EMUL_CONTINUE;
4622
4623
out:
4624
return ret;
4625
}
4626
4627
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4628
{
4629
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4630
vcpu->arch.at_instruction_boundary = true;
4631
}
4632
4633
static void svm_setup_mce(struct kvm_vcpu *vcpu)
4634
{
4635
/* [63:9] are reserved. */
4636
vcpu->arch.mcg_cap &= 0x1ff;
4637
}
4638
4639
#ifdef CONFIG_KVM_SMM
4640
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4641
{
4642
struct vcpu_svm *svm = to_svm(vcpu);
4643
4644
/* Per APM Vol.2 15.22.2 "Response to SMI" */
4645
if (!gif_set(svm))
4646
return true;
4647
4648
return is_smm(vcpu);
4649
}
4650
4651
static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4652
{
4653
struct vcpu_svm *svm = to_svm(vcpu);
4654
if (svm->nested.nested_run_pending)
4655
return -EBUSY;
4656
4657
if (svm_smi_blocked(vcpu))
4658
return 0;
4659
4660
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
4661
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4662
return -EBUSY;
4663
4664
return 1;
4665
}
4666
4667
static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
4668
{
4669
struct vcpu_svm *svm = to_svm(vcpu);
4670
struct kvm_host_map map_save;
4671
int ret;
4672
4673
if (!is_guest_mode(vcpu))
4674
return 0;
4675
4676
/*
4677
* 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is
4678
* responsible for ensuring nested SVM and SMIs are mutually exclusive.
4679
*/
4680
4681
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
4682
return 1;
4683
4684
smram->smram64.svm_guest_flag = 1;
4685
smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
4686
4687
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4688
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4689
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4690
4691
ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4692
if (ret)
4693
return ret;
4694
4695
/*
4696
* KVM uses VMCB01 to store L1 host state while L2 runs but
4697
* VMCB01 is going to be used during SMM and thus the state will
4698
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4699
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4700
* format of the area is identical to guest save area offsetted
4701
* by 0x400 (matches the offset of 'struct vmcb_save_area'
4702
* within 'struct vmcb'). Note: HSAVE area may also be used by
4703
* L1 hypervisor to save additional host context (e.g. KVM does
4704
* that, see svm_prepare_switch_to_guest()) which must be
4705
* preserved.
4706
*/
4707
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4708
return 1;
4709
4710
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4711
4712
svm_copy_vmrun_state(map_save.hva + 0x400,
4713
&svm->vmcb01.ptr->save);
4714
4715
kvm_vcpu_unmap(vcpu, &map_save);
4716
return 0;
4717
}
4718
4719
static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
4720
{
4721
struct vcpu_svm *svm = to_svm(vcpu);
4722
struct kvm_host_map map, map_save;
4723
struct vmcb *vmcb12;
4724
int ret;
4725
4726
const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4727
4728
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
4729
return 0;
4730
4731
/* Non-zero if SMI arrived while vCPU was in guest mode. */
4732
if (!smram64->svm_guest_flag)
4733
return 0;
4734
4735
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SVM))
4736
return 1;
4737
4738
if (!(smram64->efer & EFER_SVME))
4739
return 1;
4740
4741
if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
4742
return 1;
4743
4744
ret = 1;
4745
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4746
goto unmap_map;
4747
4748
if (svm_allocate_nested(svm))
4749
goto unmap_save;
4750
4751
/*
4752
* Restore L1 host state from L1 HSAVE area as VMCB01 was
4753
* used during SMM (see svm_enter_smm())
4754
*/
4755
4756
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4757
4758
/*
4759
* Enter the nested guest now
4760
*/
4761
4762
vmcb_mark_all_dirty(svm->vmcb01.ptr);
4763
4764
vmcb12 = map.hva;
4765
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
4766
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
4767
ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
4768
4769
if (ret)
4770
goto unmap_save;
4771
4772
svm->nested.nested_run_pending = 1;
4773
4774
unmap_save:
4775
kvm_vcpu_unmap(vcpu, &map_save);
4776
unmap_map:
4777
kvm_vcpu_unmap(vcpu, &map);
4778
return ret;
4779
}
4780
4781
static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
4782
{
4783
struct vcpu_svm *svm = to_svm(vcpu);
4784
4785
if (!gif_set(svm)) {
4786
if (vgif)
4787
svm_set_intercept(svm, INTERCEPT_STGI);
4788
/* STGI will cause a vm exit */
4789
} else {
4790
/* We must be in SMM; RSM will cause a vmexit anyway. */
4791
}
4792
}
4793
#endif
4794
4795
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4796
void *insn, int insn_len)
4797
{
4798
struct vcpu_svm *svm = to_svm(vcpu);
4799
bool smep, smap, is_user;
4800
u64 error_code;
4801
4802
/* Check that emulation is possible during event vectoring */
4803
if ((svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK) &&
4804
!kvm_can_emulate_event_vectoring(emul_type))
4805
return X86EMUL_UNHANDLEABLE_VECTORING;
4806
4807
/* Emulation is always possible when KVM has access to all guest state. */
4808
if (!sev_guest(vcpu->kvm))
4809
return X86EMUL_CONTINUE;
4810
4811
/* #UD and #GP should never be intercepted for SEV guests. */
4812
WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
4813
EMULTYPE_TRAP_UD_FORCED |
4814
EMULTYPE_VMWARE_GP));
4815
4816
/*
4817
* Emulation is impossible for SEV-ES guests as KVM doesn't have access
4818
* to guest register state.
4819
*/
4820
if (sev_es_guest(vcpu->kvm))
4821
return X86EMUL_RETRY_INSTR;
4822
4823
/*
4824
* Emulation is possible if the instruction is already decoded, e.g.
4825
* when completing I/O after returning from userspace.
4826
*/
4827
if (emul_type & EMULTYPE_NO_DECODE)
4828
return X86EMUL_CONTINUE;
4829
4830
/*
4831
* Emulation is possible for SEV guests if and only if a prefilled
4832
* buffer containing the bytes of the intercepted instruction is
4833
* available. SEV guest memory is encrypted with a guest specific key
4834
* and cannot be decrypted by KVM, i.e. KVM would read ciphertext and
4835
* decode garbage.
4836
*
4837
* If KVM is NOT trying to simply skip an instruction, inject #UD if
4838
* KVM reached this point without an instruction buffer. In practice,
4839
* this path should never be hit by a well-behaved guest, e.g. KVM
4840
* doesn't intercept #UD or #GP for SEV guests, but this path is still
4841
* theoretically reachable, e.g. via unaccelerated fault-like AVIC
4842
* access, and needs to be handled by KVM to avoid putting the guest
4843
* into an infinite loop. Injecting #UD is somewhat arbitrary, but
4844
* its the least awful option given lack of insight into the guest.
4845
*
4846
* If KVM is trying to skip an instruction, simply resume the guest.
4847
* If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
4848
* will attempt to re-inject the INT3/INTO and skip the instruction.
4849
* In that scenario, retrying the INT3/INTO and hoping the guest will
4850
* make forward progress is the only option that has a chance of
4851
* success (and in practice it will work the vast majority of the time).
4852
*/
4853
if (unlikely(!insn)) {
4854
if (emul_type & EMULTYPE_SKIP)
4855
return X86EMUL_UNHANDLEABLE;
4856
4857
kvm_queue_exception(vcpu, UD_VECTOR);
4858
return X86EMUL_PROPAGATE_FAULT;
4859
}
4860
4861
/*
4862
* Emulate for SEV guests if the insn buffer is not empty. The buffer
4863
* will be empty if the DecodeAssist microcode cannot fetch bytes for
4864
* the faulting instruction because the code fetch itself faulted, e.g.
4865
* the guest attempted to fetch from emulated MMIO or a guest page
4866
* table used to translate CS:RIP resides in emulated MMIO.
4867
*/
4868
if (likely(insn_len))
4869
return X86EMUL_CONTINUE;
4870
4871
/*
4872
* Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4873
*
4874
* Errata:
4875
* When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4876
* possible that CPU microcode implementing DecodeAssist will fail to
4877
* read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4878
* be '0'. This happens because microcode reads CS:RIP using a _data_
4879
* loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
4880
* gives up and does not fill the instruction bytes buffer.
4881
*
4882
* As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4883
* supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4884
* triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4885
* GuestIntrBytes field of the VMCB.
4886
*
4887
* This does _not_ mean that the erratum has been encountered, as the
4888
* DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4889
* #PF, e.g. if the guest attempt to execute from emulated MMIO and
4890
* encountered a reserved/not-present #PF.
4891
*
4892
* To hit the erratum, the following conditions must be true:
4893
* 1. CR4.SMAP=1 (obviously).
4894
* 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
4895
* have been hit as the guest would have encountered a SMEP
4896
* violation #PF, not a #NPF.
4897
* 3. The #NPF is not due to a code fetch, in which case failure to
4898
* retrieve the instruction bytes is legitimate (see abvoe).
4899
*
4900
* In addition, don't apply the erratum workaround if the #NPF occurred
4901
* while translating guest page tables (see below).
4902
*/
4903
error_code = svm->vmcb->control.exit_info_1;
4904
if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
4905
goto resume_guest;
4906
4907
smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
4908
smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
4909
is_user = svm_get_cpl(vcpu) == 3;
4910
if (smap && (!smep || is_user)) {
4911
pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
4912
4913
/*
4914
* If the fault occurred in userspace, arbitrarily inject #GP
4915
* to avoid killing the guest and to hopefully avoid confusing
4916
* the guest kernel too much, e.g. injecting #PF would not be
4917
* coherent with respect to the guest's page tables. Request
4918
* triple fault if the fault occurred in the kernel as there's
4919
* no fault that KVM can inject without confusing the guest.
4920
* In practice, the triple fault is moot as no sane SEV kernel
4921
* will execute from user memory while also running with SMAP=1.
4922
*/
4923
if (is_user)
4924
kvm_inject_gp(vcpu, 0);
4925
else
4926
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4927
return X86EMUL_PROPAGATE_FAULT;
4928
}
4929
4930
resume_guest:
4931
/*
4932
* If the erratum was not hit, simply resume the guest and let it fault
4933
* again. While awful, e.g. the vCPU may get stuck in an infinite loop
4934
* if the fault is at CPL=0, it's the lesser of all evils. Exiting to
4935
* userspace will kill the guest, and letting the emulator read garbage
4936
* will yield random behavior and potentially corrupt the guest.
4937
*
4938
* Simply resuming the guest is technically not a violation of the SEV
4939
* architecture. AMD's APM states that all code fetches and page table
4940
* accesses for SEV guest are encrypted, regardless of the C-Bit. The
4941
* APM also states that encrypted accesses to MMIO are "ignored", but
4942
* doesn't explicitly define "ignored", i.e. doing nothing and letting
4943
* the guest spin is technically "ignoring" the access.
4944
*/
4945
return X86EMUL_RETRY_INSTR;
4946
}
4947
4948
static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4949
{
4950
struct vcpu_svm *svm = to_svm(vcpu);
4951
4952
return !gif_set(svm);
4953
}
4954
4955
static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4956
{
4957
if (!sev_es_guest(vcpu->kvm))
4958
return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
4959
4960
sev_vcpu_deliver_sipi_vector(vcpu, vector);
4961
}
4962
4963
static void svm_vm_destroy(struct kvm *kvm)
4964
{
4965
avic_vm_destroy(kvm);
4966
sev_vm_destroy(kvm);
4967
4968
svm_srso_vm_destroy();
4969
}
4970
4971
static int svm_vm_init(struct kvm *kvm)
4972
{
4973
int type = kvm->arch.vm_type;
4974
4975
if (type != KVM_X86_DEFAULT_VM &&
4976
type != KVM_X86_SW_PROTECTED_VM) {
4977
kvm->arch.has_protected_state =
4978
(type == KVM_X86_SEV_ES_VM || type == KVM_X86_SNP_VM);
4979
to_kvm_sev_info(kvm)->need_init = true;
4980
4981
kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
4982
kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem;
4983
}
4984
4985
if (!pause_filter_count || !pause_filter_thresh)
4986
kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
4987
4988
if (enable_apicv) {
4989
int ret = avic_vm_init(kvm);
4990
if (ret)
4991
return ret;
4992
}
4993
4994
svm_srso_vm_init();
4995
return 0;
4996
}
4997
4998
static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
4999
{
5000
struct page *page = snp_safe_alloc_page();
5001
5002
if (!page)
5003
return NULL;
5004
5005
return page_address(page);
5006
}
5007
5008
struct kvm_x86_ops svm_x86_ops __initdata = {
5009
.name = KBUILD_MODNAME,
5010
5011
.check_processor_compatibility = svm_check_processor_compat,
5012
5013
.hardware_unsetup = svm_hardware_unsetup,
5014
.enable_virtualization_cpu = svm_enable_virtualization_cpu,
5015
.disable_virtualization_cpu = svm_disable_virtualization_cpu,
5016
.emergency_disable_virtualization_cpu = svm_emergency_disable_virtualization_cpu,
5017
.has_emulated_msr = svm_has_emulated_msr,
5018
5019
.vcpu_create = svm_vcpu_create,
5020
.vcpu_free = svm_vcpu_free,
5021
.vcpu_reset = svm_vcpu_reset,
5022
5023
.vm_size = sizeof(struct kvm_svm),
5024
.vm_init = svm_vm_init,
5025
.vm_destroy = svm_vm_destroy,
5026
5027
.prepare_switch_to_guest = svm_prepare_switch_to_guest,
5028
.vcpu_load = svm_vcpu_load,
5029
.vcpu_put = svm_vcpu_put,
5030
.vcpu_blocking = avic_vcpu_blocking,
5031
.vcpu_unblocking = avic_vcpu_unblocking,
5032
5033
.update_exception_bitmap = svm_update_exception_bitmap,
5034
.get_feature_msr = svm_get_feature_msr,
5035
.get_msr = svm_get_msr,
5036
.set_msr = svm_set_msr,
5037
.get_segment_base = svm_get_segment_base,
5038
.get_segment = svm_get_segment,
5039
.set_segment = svm_set_segment,
5040
.get_cpl = svm_get_cpl,
5041
.get_cpl_no_cache = svm_get_cpl,
5042
.get_cs_db_l_bits = svm_get_cs_db_l_bits,
5043
.is_valid_cr0 = svm_is_valid_cr0,
5044
.set_cr0 = svm_set_cr0,
5045
.post_set_cr3 = sev_post_set_cr3,
5046
.is_valid_cr4 = svm_is_valid_cr4,
5047
.set_cr4 = svm_set_cr4,
5048
.set_efer = svm_set_efer,
5049
.get_idt = svm_get_idt,
5050
.set_idt = svm_set_idt,
5051
.get_gdt = svm_get_gdt,
5052
.set_gdt = svm_set_gdt,
5053
.set_dr7 = svm_set_dr7,
5054
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
5055
.cache_reg = svm_cache_reg,
5056
.get_rflags = svm_get_rflags,
5057
.set_rflags = svm_set_rflags,
5058
.get_if_flag = svm_get_if_flag,
5059
5060
.flush_tlb_all = svm_flush_tlb_all,
5061
.flush_tlb_current = svm_flush_tlb_current,
5062
.flush_tlb_gva = svm_flush_tlb_gva,
5063
.flush_tlb_guest = svm_flush_tlb_asid,
5064
5065
.vcpu_pre_run = svm_vcpu_pre_run,
5066
.vcpu_run = svm_vcpu_run,
5067
.handle_exit = svm_handle_exit,
5068
.skip_emulated_instruction = svm_skip_emulated_instruction,
5069
.update_emulated_instruction = NULL,
5070
.set_interrupt_shadow = svm_set_interrupt_shadow,
5071
.get_interrupt_shadow = svm_get_interrupt_shadow,
5072
.patch_hypercall = svm_patch_hypercall,
5073
.inject_irq = svm_inject_irq,
5074
.inject_nmi = svm_inject_nmi,
5075
.is_vnmi_pending = svm_is_vnmi_pending,
5076
.set_vnmi_pending = svm_set_vnmi_pending,
5077
.inject_exception = svm_inject_exception,
5078
.cancel_injection = svm_cancel_injection,
5079
.interrupt_allowed = svm_interrupt_allowed,
5080
.nmi_allowed = svm_nmi_allowed,
5081
.get_nmi_mask = svm_get_nmi_mask,
5082
.set_nmi_mask = svm_set_nmi_mask,
5083
.enable_nmi_window = svm_enable_nmi_window,
5084
.enable_irq_window = svm_enable_irq_window,
5085
.update_cr8_intercept = svm_update_cr8_intercept,
5086
5087
.x2apic_icr_is_split = true,
5088
.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
5089
.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
5090
.apicv_post_state_restore = avic_apicv_post_state_restore,
5091
.required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
5092
5093
.get_exit_info = svm_get_exit_info,
5094
.get_entry_info = svm_get_entry_info,
5095
5096
.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
5097
5098
.has_wbinvd_exit = svm_has_wbinvd_exit,
5099
5100
.get_l2_tsc_offset = svm_get_l2_tsc_offset,
5101
.get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
5102
.write_tsc_offset = svm_write_tsc_offset,
5103
.write_tsc_multiplier = svm_write_tsc_multiplier,
5104
5105
.load_mmu_pgd = svm_load_mmu_pgd,
5106
5107
.check_intercept = svm_check_intercept,
5108
.handle_exit_irqoff = svm_handle_exit_irqoff,
5109
5110
.nested_ops = &svm_nested_ops,
5111
5112
.deliver_interrupt = svm_deliver_interrupt,
5113
.pi_update_irte = avic_pi_update_irte,
5114
.setup_mce = svm_setup_mce,
5115
5116
#ifdef CONFIG_KVM_SMM
5117
.smi_allowed = svm_smi_allowed,
5118
.enter_smm = svm_enter_smm,
5119
.leave_smm = svm_leave_smm,
5120
.enable_smi_window = svm_enable_smi_window,
5121
#endif
5122
5123
#ifdef CONFIG_KVM_AMD_SEV
5124
.dev_get_attr = sev_dev_get_attr,
5125
.mem_enc_ioctl = sev_mem_enc_ioctl,
5126
.mem_enc_register_region = sev_mem_enc_register_region,
5127
.mem_enc_unregister_region = sev_mem_enc_unregister_region,
5128
.guest_memory_reclaimed = sev_guest_memory_reclaimed,
5129
5130
.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
5131
.vm_move_enc_context_from = sev_vm_move_enc_context_from,
5132
#endif
5133
.check_emulate_instruction = svm_check_emulate_instruction,
5134
5135
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
5136
5137
.recalc_intercepts = svm_recalc_intercepts,
5138
.complete_emulated_msr = svm_complete_emulated_msr,
5139
5140
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
5141
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
5142
.alloc_apic_backing_page = svm_alloc_apic_backing_page,
5143
5144
.gmem_prepare = sev_gmem_prepare,
5145
.gmem_invalidate = sev_gmem_invalidate,
5146
.gmem_max_mapping_level = sev_gmem_max_mapping_level,
5147
};
5148
5149
/*
5150
* The default MMIO mask is a single bit (excluding the present bit),
5151
* which could conflict with the memory encryption bit. Check for
5152
* memory encryption support and override the default MMIO mask if
5153
* memory encryption is enabled.
5154
*/
5155
static __init void svm_adjust_mmio_mask(void)
5156
{
5157
unsigned int enc_bit, mask_bit;
5158
u64 msr, mask;
5159
5160
/* If there is no memory encryption support, use existing mask */
5161
if (cpuid_eax(0x80000000) < 0x8000001f)
5162
return;
5163
5164
/* If memory encryption is not enabled, use existing mask */
5165
rdmsrq(MSR_AMD64_SYSCFG, msr);
5166
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
5167
return;
5168
5169
enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
5170
mask_bit = boot_cpu_data.x86_phys_bits;
5171
5172
/* Increment the mask bit if it is the same as the encryption bit */
5173
if (enc_bit == mask_bit)
5174
mask_bit++;
5175
5176
/*
5177
* If the mask bit location is below 52, then some bits above the
5178
* physical addressing limit will always be reserved, so use the
5179
* rsvd_bits() function to generate the mask. This mask, along with
5180
* the present bit, will be used to generate a page fault with
5181
* PFER.RSV = 1.
5182
*
5183
* If the mask bit location is 52 (or above), then clear the mask.
5184
*/
5185
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
5186
5187
kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
5188
}
5189
5190
static __init void svm_set_cpu_caps(void)
5191
{
5192
kvm_set_cpu_caps();
5193
5194
kvm_caps.supported_perf_cap = 0;
5195
5196
kvm_cpu_cap_clear(X86_FEATURE_IBT);
5197
5198
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
5199
if (nested) {
5200
kvm_cpu_cap_set(X86_FEATURE_SVM);
5201
kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
5202
5203
/*
5204
* KVM currently flushes TLBs on *every* nested SVM transition,
5205
* and so for all intents and purposes KVM supports flushing by
5206
* ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
5207
*/
5208
kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);
5209
5210
if (nrips)
5211
kvm_cpu_cap_set(X86_FEATURE_NRIPS);
5212
5213
if (npt_enabled)
5214
kvm_cpu_cap_set(X86_FEATURE_NPT);
5215
5216
if (tsc_scaling)
5217
kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
5218
5219
if (vls)
5220
kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
5221
if (lbrv)
5222
kvm_cpu_cap_set(X86_FEATURE_LBRV);
5223
5224
if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
5225
kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
5226
5227
if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
5228
kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
5229
5230
if (vgif)
5231
kvm_cpu_cap_set(X86_FEATURE_VGIF);
5232
5233
if (vnmi)
5234
kvm_cpu_cap_set(X86_FEATURE_VNMI);
5235
5236
/* Nested VM can receive #VMEXIT instead of triggering #GP */
5237
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
5238
}
5239
5240
if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD))
5241
kvm_caps.has_bus_lock_exit = true;
5242
5243
/* CPUID 0x80000008 */
5244
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5245
boot_cpu_has(X86_FEATURE_AMD_SSBD))
5246
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
5247
5248
if (enable_pmu) {
5249
/*
5250
* Enumerate support for PERFCTR_CORE if and only if KVM has
5251
* access to enough counters to virtualize "core" support,
5252
* otherwise limit vPMU support to the legacy number of counters.
5253
*/
5254
if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5255
kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5256
kvm_pmu_cap.num_counters_gp);
5257
else
5258
kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
5259
5260
if (kvm_pmu_cap.version != 2 ||
5261
!kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
5262
kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
5263
}
5264
5265
/* CPUID 0x8000001F (SME/SEV features) */
5266
sev_set_cpu_caps();
5267
5268
/*
5269
* Clear capabilities that are automatically configured by common code,
5270
* but that require explicit SVM support (that isn't yet implemented).
5271
*/
5272
kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
5273
kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM);
5274
}
5275
5276
static __init int svm_hardware_setup(void)
5277
{
5278
void *iopm_va;
5279
int cpu, r;
5280
5281
/*
5282
* NX is required for shadow paging and for NPT if the NX huge pages
5283
* mitigation is enabled.
5284
*/
5285
if (!boot_cpu_has(X86_FEATURE_NX)) {
5286
pr_err_ratelimited("NX (Execute Disable) not supported\n");
5287
return -EOPNOTSUPP;
5288
}
5289
kvm_enable_efer_bits(EFER_NX);
5290
5291
kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
5292
XFEATURE_MASK_BNDCSR);
5293
5294
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
5295
kvm_enable_efer_bits(EFER_FFXSR);
5296
5297
if (tsc_scaling) {
5298
if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5299
tsc_scaling = false;
5300
} else {
5301
pr_info("TSC scaling supported\n");
5302
kvm_caps.has_tsc_control = true;
5303
}
5304
}
5305
kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
5306
kvm_caps.tsc_scaling_ratio_frac_bits = 32;
5307
5308
tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
5309
5310
if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
5311
kvm_enable_efer_bits(EFER_AUTOIBRS);
5312
5313
/* Check for pause filtering support */
5314
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5315
pause_filter_count = 0;
5316
pause_filter_thresh = 0;
5317
} else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5318
pause_filter_thresh = 0;
5319
}
5320
5321
if (nested) {
5322
pr_info("Nested Virtualization enabled\n");
5323
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
5324
5325
r = nested_svm_init_msrpm_merge_offsets();
5326
if (r)
5327
return r;
5328
}
5329
5330
/*
5331
* KVM's MMU doesn't support using 2-level paging for itself, and thus
5332
* NPT isn't supported if the host is using 2-level paging since host
5333
* CR4 is unchanged on VMRUN.
5334
*/
5335
if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5336
npt_enabled = false;
5337
5338
if (!boot_cpu_has(X86_FEATURE_NPT))
5339
npt_enabled = false;
5340
5341
/* Force VM NPT level equal to the host's paging level */
5342
kvm_configure_mmu(npt_enabled, get_npt_level(),
5343
get_npt_level(), PG_LEVEL_1G);
5344
pr_info("Nested Paging %s\n", str_enabled_disabled(npt_enabled));
5345
5346
/*
5347
* It seems that on AMD processors PTE's accessed bit is
5348
* being set by the CPU hardware before the NPF vmexit.
5349
* This is not expected behaviour and our tests fail because
5350
* of it.
5351
* A workaround here is to disable support for
5352
* GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5353
* In this case userspace can know if there is support using
5354
* KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5355
* it
5356
* If future AMD CPU models change the behaviour described above,
5357
* this variable can be changed accordingly
5358
*/
5359
allow_smaller_maxphyaddr = !npt_enabled;
5360
5361
/* Setup shadow_me_value and shadow_me_mask */
5362
kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5363
5364
svm_adjust_mmio_mask();
5365
5366
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
5367
5368
if (lbrv) {
5369
if (!boot_cpu_has(X86_FEATURE_LBRV))
5370
lbrv = false;
5371
else
5372
pr_info("LBR virtualization supported\n");
5373
}
5374
5375
iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL);
5376
if (!iopm_va)
5377
return -ENOMEM;
5378
5379
iopm_base = __sme_set(__pa(iopm_va));
5380
5381
/*
5382
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5383
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
5384
*/
5385
sev_hardware_setup();
5386
5387
svm_hv_hardware_setup();
5388
5389
for_each_possible_cpu(cpu) {
5390
r = svm_cpu_init(cpu);
5391
if (r)
5392
goto err;
5393
}
5394
5395
enable_apicv = avic_hardware_setup();
5396
if (!enable_apicv) {
5397
enable_ipiv = false;
5398
svm_x86_ops.vcpu_blocking = NULL;
5399
svm_x86_ops.vcpu_unblocking = NULL;
5400
svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5401
}
5402
5403
if (vls) {
5404
if (!npt_enabled ||
5405
!boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5406
!IS_ENABLED(CONFIG_X86_64)) {
5407
vls = false;
5408
} else {
5409
pr_info("Virtual VMLOAD VMSAVE supported\n");
5410
}
5411
}
5412
5413
if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5414
svm_gp_erratum_intercept = false;
5415
5416
if (vgif) {
5417
if (!boot_cpu_has(X86_FEATURE_VGIF))
5418
vgif = false;
5419
else
5420
pr_info("Virtual GIF supported\n");
5421
}
5422
5423
vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI);
5424
if (vnmi)
5425
pr_info("Virtual NMI enabled\n");
5426
5427
if (!vnmi) {
5428
svm_x86_ops.is_vnmi_pending = NULL;
5429
svm_x86_ops.set_vnmi_pending = NULL;
5430
}
5431
5432
if (!enable_pmu)
5433
pr_info("PMU virtualization is disabled\n");
5434
5435
svm_set_cpu_caps();
5436
5437
kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED;
5438
return 0;
5439
5440
err:
5441
svm_hardware_unsetup();
5442
return r;
5443
}
5444
5445
5446
static struct kvm_x86_init_ops svm_init_ops __initdata = {
5447
.hardware_setup = svm_hardware_setup,
5448
5449
.runtime_ops = &svm_x86_ops,
5450
.pmu_ops = &amd_pmu_ops,
5451
};
5452
5453
static void __svm_exit(void)
5454
{
5455
kvm_x86_vendor_exit();
5456
}
5457
5458
static int __init svm_init(void)
5459
{
5460
int r;
5461
5462
KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm);
5463
5464
__unused_size_checks();
5465
5466
if (!kvm_is_svm_supported())
5467
return -EOPNOTSUPP;
5468
5469
r = kvm_x86_vendor_init(&svm_init_ops);
5470
if (r)
5471
return r;
5472
5473
/*
5474
* Common KVM initialization _must_ come last, after this, /dev/kvm is
5475
* exposed to userspace!
5476
*/
5477
r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm),
5478
THIS_MODULE);
5479
if (r)
5480
goto err_kvm_init;
5481
5482
return 0;
5483
5484
err_kvm_init:
5485
__svm_exit();
5486
return r;
5487
}
5488
5489
static void __exit svm_exit(void)
5490
{
5491
kvm_exit();
5492
__svm_exit();
5493
}
5494
5495
module_init(svm_init)
5496
module_exit(svm_exit)
5497
5498