Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/hyperv.c
29521 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* KVM Microsoft Hyper-V emulation
4
*
5
* derived from arch/x86/kvm/x86.c
6
*
7
* Copyright (C) 2006 Qumranet, Inc.
8
* Copyright (C) 2008 Qumranet, Inc.
9
* Copyright IBM Corporation, 2008
10
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
11
* Copyright (C) 2015 Andrey Smetanin <[email protected]>
12
*
13
* Authors:
14
* Avi Kivity <[email protected]>
15
* Yaniv Kamay <[email protected]>
16
* Amit Shah <[email protected]>
17
* Ben-Ami Yassour <[email protected]>
18
* Andrey Smetanin <[email protected]>
19
*/
20
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22
#include "x86.h"
23
#include "lapic.h"
24
#include "ioapic.h"
25
#include "cpuid.h"
26
#include "hyperv.h"
27
#include "mmu.h"
28
#include "xen.h"
29
30
#include <linux/cpu.h>
31
#include <linux/kvm_host.h>
32
#include <linux/highmem.h>
33
#include <linux/sched/cputime.h>
34
#include <linux/spinlock.h>
35
#include <linux/eventfd.h>
36
37
#include <asm/apicdef.h>
38
#include <asm/mshyperv.h>
39
#include <trace/events/kvm.h>
40
41
#include "trace.h"
42
#include "irq.h"
43
#include "fpu.h"
44
45
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK)
46
47
/*
48
* As per Hyper-V TLFS, extended hypercalls start from 0x8001
49
* (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value
50
* where each bit tells which extended hypercall is available besides
51
* HvExtCallQueryCapabilities.
52
*
53
* 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
54
* assigned.
55
*
56
* 0x8002 - Bit 0
57
* 0x8003 - Bit 1
58
* ..
59
* 0x8041 - Bit 63
60
*
61
* Therefore, HV_EXT_CALL_MAX = 0x8001 + 64
62
*/
63
#define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64)
64
65
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
66
bool vcpu_kick);
67
68
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
69
{
70
return atomic64_read(&synic->sint[sint]);
71
}
72
73
static inline int synic_get_sint_vector(u64 sint_value)
74
{
75
if (sint_value & HV_SYNIC_SINT_MASKED)
76
return -1;
77
return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
78
}
79
80
static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
81
int vector)
82
{
83
int i;
84
85
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
86
if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
87
return true;
88
}
89
return false;
90
}
91
92
static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
93
int vector)
94
{
95
int i;
96
u64 sint_value;
97
98
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
99
sint_value = synic_read_sint(synic, i);
100
if (synic_get_sint_vector(sint_value) == vector &&
101
sint_value & HV_SYNIC_SINT_AUTO_EOI)
102
return true;
103
}
104
return false;
105
}
106
107
static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
108
int vector)
109
{
110
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
111
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
112
bool auto_eoi_old, auto_eoi_new;
113
114
if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
115
return;
116
117
if (synic_has_vector_connected(synic, vector))
118
__set_bit(vector, synic->vec_bitmap);
119
else
120
__clear_bit(vector, synic->vec_bitmap);
121
122
auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
123
124
if (synic_has_vector_auto_eoi(synic, vector))
125
__set_bit(vector, synic->auto_eoi_bitmap);
126
else
127
__clear_bit(vector, synic->auto_eoi_bitmap);
128
129
auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
130
131
if (auto_eoi_old == auto_eoi_new)
132
return;
133
134
if (!enable_apicv)
135
return;
136
137
down_write(&vcpu->kvm->arch.apicv_update_lock);
138
139
if (auto_eoi_new)
140
hv->synic_auto_eoi_used++;
141
else
142
hv->synic_auto_eoi_used--;
143
144
/*
145
* Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
146
* the hypervisor to manually inject IRQs.
147
*/
148
__kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
149
APICV_INHIBIT_REASON_HYPERV,
150
!!hv->synic_auto_eoi_used);
151
152
up_write(&vcpu->kvm->arch.apicv_update_lock);
153
}
154
155
static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
156
u64 data, bool host)
157
{
158
int vector, old_vector;
159
bool masked;
160
161
vector = data & HV_SYNIC_SINT_VECTOR_MASK;
162
masked = data & HV_SYNIC_SINT_MASKED;
163
164
/*
165
* Valid vectors are 16-255, however, nested Hyper-V attempts to write
166
* default '0x10000' value on boot and this should not #GP. We need to
167
* allow zero-initing the register from host as well.
168
*/
169
if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
170
return 1;
171
/*
172
* Guest may configure multiple SINTs to use the same vector, so
173
* we maintain a bitmap of vectors handled by synic, and a
174
* bitmap of vectors with auto-eoi behavior. The bitmaps are
175
* updated here, and atomically queried on fast paths.
176
*/
177
old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
178
179
atomic64_set(&synic->sint[sint], data);
180
181
synic_update_vector(synic, old_vector);
182
183
synic_update_vector(synic, vector);
184
185
/* Load SynIC vectors into EOI exit bitmap */
186
kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
187
return 0;
188
}
189
190
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
191
{
192
struct kvm_vcpu *vcpu = NULL;
193
unsigned long i;
194
195
if (vpidx >= KVM_MAX_VCPUS)
196
return NULL;
197
198
vcpu = kvm_get_vcpu(kvm, vpidx);
199
if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
200
return vcpu;
201
kvm_for_each_vcpu(i, vcpu, kvm)
202
if (kvm_hv_get_vpindex(vcpu) == vpidx)
203
return vcpu;
204
return NULL;
205
}
206
207
static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
208
{
209
struct kvm_vcpu *vcpu;
210
struct kvm_vcpu_hv_synic *synic;
211
212
vcpu = get_vcpu_by_vpidx(kvm, vpidx);
213
if (!vcpu || !to_hv_vcpu(vcpu))
214
return NULL;
215
synic = to_hv_synic(vcpu);
216
return (synic->active) ? synic : NULL;
217
}
218
219
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
220
{
221
struct kvm *kvm = vcpu->kvm;
222
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
223
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
224
struct kvm_vcpu_hv_stimer *stimer;
225
int gsi, idx;
226
227
trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
228
229
/* Try to deliver pending Hyper-V SynIC timers messages */
230
for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
231
stimer = &hv_vcpu->stimer[idx];
232
if (stimer->msg_pending && stimer->config.enable &&
233
!stimer->config.direct_mode &&
234
stimer->config.sintx == sint)
235
stimer_mark_pending(stimer, false);
236
}
237
238
idx = srcu_read_lock(&kvm->irq_srcu);
239
gsi = atomic_read(&synic->sint_to_gsi[sint]);
240
if (gsi != -1)
241
kvm_notify_acked_gsi(kvm, gsi);
242
srcu_read_unlock(&kvm->irq_srcu, idx);
243
}
244
245
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
246
{
247
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
248
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
249
250
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
251
hv_vcpu->exit.u.synic.msr = msr;
252
hv_vcpu->exit.u.synic.control = synic->control;
253
hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
254
hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
255
256
kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
257
}
258
259
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
260
u32 msr, u64 data, bool host)
261
{
262
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
263
int ret;
264
265
if (!synic->active && (!host || data))
266
return 1;
267
268
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
269
270
ret = 0;
271
switch (msr) {
272
case HV_X64_MSR_SCONTROL:
273
synic->control = data;
274
if (!host)
275
synic_exit(synic, msr);
276
break;
277
case HV_X64_MSR_SVERSION:
278
if (!host) {
279
ret = 1;
280
break;
281
}
282
synic->version = data;
283
break;
284
case HV_X64_MSR_SIEFP:
285
if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
286
!synic->dont_zero_synic_pages)
287
if (kvm_clear_guest(vcpu->kvm,
288
data & PAGE_MASK, PAGE_SIZE)) {
289
ret = 1;
290
break;
291
}
292
synic->evt_page = data;
293
if (!host)
294
synic_exit(synic, msr);
295
break;
296
case HV_X64_MSR_SIMP:
297
if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
298
!synic->dont_zero_synic_pages)
299
if (kvm_clear_guest(vcpu->kvm,
300
data & PAGE_MASK, PAGE_SIZE)) {
301
ret = 1;
302
break;
303
}
304
synic->msg_page = data;
305
if (!host)
306
synic_exit(synic, msr);
307
break;
308
case HV_X64_MSR_EOM: {
309
int i;
310
311
if (!synic->active)
312
break;
313
314
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
315
kvm_hv_notify_acked_sint(vcpu, i);
316
break;
317
}
318
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
319
ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
320
break;
321
default:
322
ret = 1;
323
break;
324
}
325
return ret;
326
}
327
328
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
329
{
330
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
331
332
return hv_vcpu->cpuid_cache.syndbg_cap_eax &
333
HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
334
}
335
336
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
337
{
338
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
339
340
if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
341
hv->hv_syndbg.control.status =
342
vcpu->run->hyperv.u.syndbg.status;
343
return 1;
344
}
345
346
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
347
{
348
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
349
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
350
351
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
352
hv_vcpu->exit.u.syndbg.msr = msr;
353
hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
354
hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
355
hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
356
hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
357
vcpu->arch.complete_userspace_io =
358
kvm_hv_syndbg_complete_userspace;
359
360
kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
361
}
362
363
static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
364
{
365
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
366
367
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
368
return 1;
369
370
trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
371
to_hv_vcpu(vcpu)->vp_index, msr, data);
372
switch (msr) {
373
case HV_X64_MSR_SYNDBG_CONTROL:
374
syndbg->control.control = data;
375
if (!host)
376
syndbg_exit(vcpu, msr);
377
break;
378
case HV_X64_MSR_SYNDBG_STATUS:
379
syndbg->control.status = data;
380
break;
381
case HV_X64_MSR_SYNDBG_SEND_BUFFER:
382
syndbg->control.send_page = data;
383
break;
384
case HV_X64_MSR_SYNDBG_RECV_BUFFER:
385
syndbg->control.recv_page = data;
386
break;
387
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
388
syndbg->control.pending_page = data;
389
if (!host)
390
syndbg_exit(vcpu, msr);
391
break;
392
case HV_X64_MSR_SYNDBG_OPTIONS:
393
syndbg->options = data;
394
break;
395
default:
396
break;
397
}
398
399
return 0;
400
}
401
402
static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
403
{
404
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
405
406
if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
407
return 1;
408
409
switch (msr) {
410
case HV_X64_MSR_SYNDBG_CONTROL:
411
*pdata = syndbg->control.control;
412
break;
413
case HV_X64_MSR_SYNDBG_STATUS:
414
*pdata = syndbg->control.status;
415
break;
416
case HV_X64_MSR_SYNDBG_SEND_BUFFER:
417
*pdata = syndbg->control.send_page;
418
break;
419
case HV_X64_MSR_SYNDBG_RECV_BUFFER:
420
*pdata = syndbg->control.recv_page;
421
break;
422
case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
423
*pdata = syndbg->control.pending_page;
424
break;
425
case HV_X64_MSR_SYNDBG_OPTIONS:
426
*pdata = syndbg->options;
427
break;
428
default:
429
break;
430
}
431
432
trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
433
434
return 0;
435
}
436
437
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
438
bool host)
439
{
440
int ret;
441
442
if (!synic->active && !host)
443
return 1;
444
445
ret = 0;
446
switch (msr) {
447
case HV_X64_MSR_SCONTROL:
448
*pdata = synic->control;
449
break;
450
case HV_X64_MSR_SVERSION:
451
*pdata = synic->version;
452
break;
453
case HV_X64_MSR_SIEFP:
454
*pdata = synic->evt_page;
455
break;
456
case HV_X64_MSR_SIMP:
457
*pdata = synic->msg_page;
458
break;
459
case HV_X64_MSR_EOM:
460
*pdata = 0;
461
break;
462
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
463
*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
464
break;
465
default:
466
ret = 1;
467
break;
468
}
469
return ret;
470
}
471
472
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
473
{
474
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
475
struct kvm_lapic_irq irq;
476
int ret, vector;
477
478
if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
479
return -EINVAL;
480
481
if (sint >= ARRAY_SIZE(synic->sint))
482
return -EINVAL;
483
484
vector = synic_get_sint_vector(synic_read_sint(synic, sint));
485
if (vector < 0)
486
return -ENOENT;
487
488
memset(&irq, 0, sizeof(irq));
489
irq.shorthand = APIC_DEST_SELF;
490
irq.dest_mode = APIC_DEST_PHYSICAL;
491
irq.delivery_mode = APIC_DM_FIXED;
492
irq.vector = vector;
493
irq.level = 1;
494
495
ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
496
trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
497
return ret;
498
}
499
500
int kvm_hv_synic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
501
int irq_source_id, int level, bool line_status)
502
{
503
struct kvm_vcpu_hv_synic *synic;
504
505
if (!level)
506
return -1;
507
508
synic = synic_get(kvm, e->hv_sint.vcpu);
509
if (!synic)
510
return -EINVAL;
511
512
return synic_set_irq(synic, e->hv_sint.sint);
513
}
514
515
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
516
{
517
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
518
int i;
519
520
trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
521
522
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
523
if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
524
kvm_hv_notify_acked_sint(vcpu, i);
525
}
526
527
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
528
{
529
struct kvm_vcpu_hv_synic *synic;
530
531
synic = synic_get(kvm, vpidx);
532
if (!synic)
533
return -EINVAL;
534
535
if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
536
return -EINVAL;
537
538
atomic_set(&synic->sint_to_gsi[sint], gsi);
539
return 0;
540
}
541
542
void kvm_hv_irq_routing_update(struct kvm *kvm)
543
{
544
struct kvm_irq_routing_table *irq_rt;
545
struct kvm_kernel_irq_routing_entry *e;
546
u32 gsi;
547
548
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
549
lockdep_is_held(&kvm->irq_lock));
550
551
for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
552
hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
553
if (e->type == KVM_IRQ_ROUTING_HV_SINT)
554
kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
555
e->hv_sint.sint, gsi);
556
}
557
}
558
}
559
560
static void synic_init(struct kvm_vcpu_hv_synic *synic)
561
{
562
int i;
563
564
memset(synic, 0, sizeof(*synic));
565
synic->version = HV_SYNIC_VERSION_1;
566
for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
567
atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
568
atomic_set(&synic->sint_to_gsi[i], -1);
569
}
570
}
571
572
static u64 get_time_ref_counter(struct kvm *kvm)
573
{
574
struct kvm_hv *hv = to_kvm_hv(kvm);
575
struct kvm_vcpu *vcpu;
576
u64 tsc;
577
578
/*
579
* Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
580
* is broken, disabled or being updated.
581
*/
582
if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
583
return div_u64(get_kvmclock_ns(kvm), 100);
584
585
vcpu = kvm_get_vcpu(kvm, 0);
586
tsc = kvm_read_l1_tsc(vcpu, rdtsc());
587
return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
588
+ hv->tsc_ref.tsc_offset;
589
}
590
591
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
592
bool vcpu_kick)
593
{
594
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
595
596
set_bit(stimer->index,
597
to_hv_vcpu(vcpu)->stimer_pending_bitmap);
598
kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
599
if (vcpu_kick)
600
kvm_vcpu_kick(vcpu);
601
}
602
603
static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
604
{
605
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
606
607
trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
608
stimer->index);
609
610
hrtimer_cancel(&stimer->timer);
611
clear_bit(stimer->index,
612
to_hv_vcpu(vcpu)->stimer_pending_bitmap);
613
stimer->msg_pending = false;
614
stimer->exp_time = 0;
615
}
616
617
static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
618
{
619
struct kvm_vcpu_hv_stimer *stimer;
620
621
stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
622
trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
623
stimer->index);
624
stimer_mark_pending(stimer, true);
625
626
return HRTIMER_NORESTART;
627
}
628
629
/*
630
* stimer_start() assumptions:
631
* a) stimer->count is not equal to 0
632
* b) stimer->config has HV_STIMER_ENABLE flag
633
*/
634
static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
635
{
636
u64 time_now;
637
ktime_t ktime_now;
638
639
time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
640
ktime_now = ktime_get();
641
642
if (stimer->config.periodic) {
643
if (stimer->exp_time) {
644
if (time_now >= stimer->exp_time) {
645
u64 remainder;
646
647
div64_u64_rem(time_now - stimer->exp_time,
648
stimer->count, &remainder);
649
stimer->exp_time =
650
time_now + (stimer->count - remainder);
651
}
652
} else
653
stimer->exp_time = time_now + stimer->count;
654
655
trace_kvm_hv_stimer_start_periodic(
656
hv_stimer_to_vcpu(stimer)->vcpu_id,
657
stimer->index,
658
time_now, stimer->exp_time);
659
660
hrtimer_start(&stimer->timer,
661
ktime_add_ns(ktime_now,
662
100 * (stimer->exp_time - time_now)),
663
HRTIMER_MODE_ABS);
664
return 0;
665
}
666
stimer->exp_time = stimer->count;
667
if (time_now >= stimer->count) {
668
/*
669
* Expire timer according to Hypervisor Top-Level Functional
670
* specification v4(15.3.1):
671
* "If a one shot is enabled and the specified count is in
672
* the past, it will expire immediately."
673
*/
674
stimer_mark_pending(stimer, false);
675
return 0;
676
}
677
678
trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
679
stimer->index,
680
time_now, stimer->count);
681
682
hrtimer_start(&stimer->timer,
683
ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
684
HRTIMER_MODE_ABS);
685
return 0;
686
}
687
688
static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
689
bool host)
690
{
691
union hv_stimer_config new_config = {.as_uint64 = config},
692
old_config = {.as_uint64 = stimer->config.as_uint64};
693
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
694
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
695
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
696
697
if (!synic->active && (!host || config))
698
return 1;
699
700
if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
701
!(hv_vcpu->cpuid_cache.features_edx &
702
HV_STIMER_DIRECT_MODE_AVAILABLE)))
703
return 1;
704
705
trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
706
stimer->index, config, host);
707
708
stimer_cleanup(stimer);
709
if (old_config.enable &&
710
!new_config.direct_mode && new_config.sintx == 0)
711
new_config.enable = 0;
712
stimer->config.as_uint64 = new_config.as_uint64;
713
714
if (stimer->config.enable)
715
stimer_mark_pending(stimer, false);
716
717
return 0;
718
}
719
720
static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
721
bool host)
722
{
723
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
724
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
725
726
if (!synic->active && (!host || count))
727
return 1;
728
729
trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
730
stimer->index, count, host);
731
732
stimer_cleanup(stimer);
733
stimer->count = count;
734
if (!host) {
735
if (stimer->count == 0)
736
stimer->config.enable = 0;
737
else if (stimer->config.auto_enable)
738
stimer->config.enable = 1;
739
}
740
741
if (stimer->config.enable)
742
stimer_mark_pending(stimer, false);
743
744
return 0;
745
}
746
747
static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
748
{
749
*pconfig = stimer->config.as_uint64;
750
return 0;
751
}
752
753
static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
754
{
755
*pcount = stimer->count;
756
return 0;
757
}
758
759
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
760
struct hv_message *src_msg, bool no_retry)
761
{
762
struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
763
int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
764
gfn_t msg_page_gfn;
765
struct hv_message_header hv_hdr;
766
int r;
767
768
if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
769
return -ENOENT;
770
771
msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
772
773
/*
774
* Strictly following the spec-mandated ordering would assume setting
775
* .msg_pending before checking .message_type. However, this function
776
* is only called in vcpu context so the entire update is atomic from
777
* guest POV and thus the exact order here doesn't matter.
778
*/
779
r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
780
msg_off + offsetof(struct hv_message,
781
header.message_type),
782
sizeof(hv_hdr.message_type));
783
if (r < 0)
784
return r;
785
786
if (hv_hdr.message_type != HVMSG_NONE) {
787
if (no_retry)
788
return 0;
789
790
hv_hdr.message_flags.msg_pending = 1;
791
r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
792
&hv_hdr.message_flags,
793
msg_off +
794
offsetof(struct hv_message,
795
header.message_flags),
796
sizeof(hv_hdr.message_flags));
797
if (r < 0)
798
return r;
799
return -EAGAIN;
800
}
801
802
r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
803
sizeof(src_msg->header) +
804
src_msg->header.payload_size);
805
if (r < 0)
806
return r;
807
808
r = synic_set_irq(synic, sint);
809
if (r < 0)
810
return r;
811
if (r == 0)
812
return -EFAULT;
813
return 0;
814
}
815
816
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
817
{
818
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
819
struct hv_message *msg = &stimer->msg;
820
struct hv_timer_message_payload *payload =
821
(struct hv_timer_message_payload *)&msg->u.payload;
822
823
/*
824
* To avoid piling up periodic ticks, don't retry message
825
* delivery for them (within "lazy" lost ticks policy).
826
*/
827
bool no_retry = stimer->config.periodic;
828
829
payload->expiration_time = stimer->exp_time;
830
payload->delivery_time = get_time_ref_counter(vcpu->kvm);
831
return synic_deliver_msg(to_hv_synic(vcpu),
832
stimer->config.sintx, msg,
833
no_retry);
834
}
835
836
static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
837
{
838
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
839
struct kvm_lapic_irq irq = {
840
.delivery_mode = APIC_DM_FIXED,
841
.vector = stimer->config.apic_vector
842
};
843
844
if (lapic_in_kernel(vcpu))
845
return !kvm_apic_set_irq(vcpu, &irq, NULL);
846
return 0;
847
}
848
849
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
850
{
851
int r, direct = stimer->config.direct_mode;
852
853
stimer->msg_pending = true;
854
if (!direct)
855
r = stimer_send_msg(stimer);
856
else
857
r = stimer_notify_direct(stimer);
858
trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
859
stimer->index, direct, r);
860
if (!r) {
861
stimer->msg_pending = false;
862
if (!(stimer->config.periodic))
863
stimer->config.enable = 0;
864
}
865
}
866
867
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
868
{
869
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
870
struct kvm_vcpu_hv_stimer *stimer;
871
u64 time_now, exp_time;
872
int i;
873
874
if (!hv_vcpu)
875
return;
876
877
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
878
if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
879
stimer = &hv_vcpu->stimer[i];
880
if (stimer->config.enable) {
881
exp_time = stimer->exp_time;
882
883
if (exp_time) {
884
time_now =
885
get_time_ref_counter(vcpu->kvm);
886
if (time_now >= exp_time)
887
stimer_expiration(stimer);
888
}
889
890
if ((stimer->config.enable) &&
891
stimer->count) {
892
if (!stimer->msg_pending)
893
stimer_start(stimer);
894
} else
895
stimer_cleanup(stimer);
896
}
897
}
898
}
899
900
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
901
{
902
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
903
int i;
904
905
if (!hv_vcpu)
906
return;
907
908
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
909
stimer_cleanup(&hv_vcpu->stimer[i]);
910
911
kfree(hv_vcpu);
912
vcpu->arch.hyperv = NULL;
913
}
914
915
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
916
{
917
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
918
919
if (!hv_vcpu)
920
return false;
921
922
if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
923
return false;
924
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
925
}
926
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_hv_assist_page_enabled);
927
928
int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
929
{
930
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
931
932
if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
933
return -EFAULT;
934
935
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
936
&hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
937
}
938
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_hv_get_assist_page);
939
940
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
941
{
942
struct hv_message *msg = &stimer->msg;
943
struct hv_timer_message_payload *payload =
944
(struct hv_timer_message_payload *)&msg->u.payload;
945
946
memset(&msg->header, 0, sizeof(msg->header));
947
msg->header.message_type = HVMSG_TIMER_EXPIRED;
948
msg->header.payload_size = sizeof(*payload);
949
950
payload->timer_index = stimer->index;
951
payload->expiration_time = 0;
952
payload->delivery_time = 0;
953
}
954
955
static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
956
{
957
memset(stimer, 0, sizeof(*stimer));
958
stimer->index = timer_index;
959
hrtimer_setup(&stimer->timer, stimer_timer_callback, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
960
stimer_prepare_msg(stimer);
961
}
962
963
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
964
{
965
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
966
int i;
967
968
if (hv_vcpu)
969
return 0;
970
971
hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
972
if (!hv_vcpu)
973
return -ENOMEM;
974
975
vcpu->arch.hyperv = hv_vcpu;
976
hv_vcpu->vcpu = vcpu;
977
978
synic_init(&hv_vcpu->synic);
979
980
bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
981
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
982
stimer_init(&hv_vcpu->stimer[i], i);
983
984
hv_vcpu->vp_index = vcpu->vcpu_idx;
985
986
for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
987
INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
988
spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
989
}
990
991
return 0;
992
}
993
994
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
995
{
996
struct kvm_vcpu_hv_synic *synic;
997
int r;
998
999
r = kvm_hv_vcpu_init(vcpu);
1000
if (r)
1001
return r;
1002
1003
synic = to_hv_synic(vcpu);
1004
1005
synic->active = true;
1006
synic->dont_zero_synic_pages = dont_zero_synic_pages;
1007
synic->control = HV_SYNIC_CONTROL_ENABLE;
1008
return 0;
1009
}
1010
1011
static bool kvm_hv_msr_partition_wide(u32 msr)
1012
{
1013
bool r = false;
1014
1015
switch (msr) {
1016
case HV_X64_MSR_GUEST_OS_ID:
1017
case HV_X64_MSR_HYPERCALL:
1018
case HV_X64_MSR_REFERENCE_TSC:
1019
case HV_X64_MSR_TIME_REF_COUNT:
1020
case HV_X64_MSR_CRASH_CTL:
1021
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1022
case HV_X64_MSR_RESET:
1023
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1024
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1025
case HV_X64_MSR_TSC_EMULATION_STATUS:
1026
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1027
case HV_X64_MSR_SYNDBG_OPTIONS:
1028
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1029
r = true;
1030
break;
1031
}
1032
1033
return r;
1034
}
1035
1036
static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
1037
{
1038
struct kvm_hv *hv = to_kvm_hv(kvm);
1039
size_t size = ARRAY_SIZE(hv->hv_crash_param);
1040
1041
if (WARN_ON_ONCE(index >= size))
1042
return -EINVAL;
1043
1044
*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1045
return 0;
1046
}
1047
1048
static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1049
{
1050
struct kvm_hv *hv = to_kvm_hv(kvm);
1051
1052
*pdata = hv->hv_crash_ctl;
1053
return 0;
1054
}
1055
1056
static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1057
{
1058
struct kvm_hv *hv = to_kvm_hv(kvm);
1059
1060
hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1061
1062
return 0;
1063
}
1064
1065
static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1066
{
1067
struct kvm_hv *hv = to_kvm_hv(kvm);
1068
size_t size = ARRAY_SIZE(hv->hv_crash_param);
1069
1070
if (WARN_ON_ONCE(index >= size))
1071
return -EINVAL;
1072
1073
hv->hv_crash_param[array_index_nospec(index, size)] = data;
1074
return 0;
1075
}
1076
1077
/*
1078
* The kvmclock and Hyper-V TSC page use similar formulas, and converting
1079
* between them is possible:
1080
*
1081
* kvmclock formula:
1082
* nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1083
* + system_time
1084
*
1085
* Hyper-V formula:
1086
* nsec/100 = ticks * scale / 2^64 + offset
1087
*
1088
* When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1089
* By dividing the kvmclock formula by 100 and equating what's left we get:
1090
* ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1091
* scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1092
* scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1093
*
1094
* Now expand the kvmclock formula and divide by 100:
1095
* nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1096
* - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1097
* + system_time
1098
* nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1099
* - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1100
* + system_time / 100
1101
*
1102
* Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1103
* nsec/100 = ticks * scale / 2^64
1104
* - tsc_timestamp * scale / 2^64
1105
* + system_time / 100
1106
*
1107
* Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1108
* offset = system_time / 100 - tsc_timestamp * scale / 2^64
1109
*
1110
* These two equivalencies are implemented in this function.
1111
*/
1112
static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1113
struct ms_hyperv_tsc_page *tsc_ref)
1114
{
1115
u64 max_mul;
1116
1117
if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1118
return false;
1119
1120
/*
1121
* check if scale would overflow, if so we use the time ref counter
1122
* tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1123
* tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1124
* tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1125
*/
1126
max_mul = 100ull << (32 - hv_clock->tsc_shift);
1127
if (hv_clock->tsc_to_system_mul >= max_mul)
1128
return false;
1129
1130
/*
1131
* Otherwise compute the scale and offset according to the formulas
1132
* derived above.
1133
*/
1134
tsc_ref->tsc_scale =
1135
mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1136
hv_clock->tsc_to_system_mul,
1137
100);
1138
1139
tsc_ref->tsc_offset = hv_clock->system_time;
1140
do_div(tsc_ref->tsc_offset, 100);
1141
tsc_ref->tsc_offset -=
1142
mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1143
return true;
1144
}
1145
1146
/*
1147
* Don't touch TSC page values if the guest has opted for TSC emulation after
1148
* migration. KVM doesn't fully support reenlightenment notifications and TSC
1149
* access emulation and Hyper-V is known to expect the values in TSC page to
1150
* stay constant before TSC access emulation is disabled from guest side
1151
* (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1152
* frequency and guest visible TSC value across migration (and prevent it when
1153
* TSC scaling is unsupported).
1154
*/
1155
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1156
{
1157
return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1158
hv->hv_tsc_emulation_control;
1159
}
1160
1161
void kvm_hv_setup_tsc_page(struct kvm *kvm,
1162
struct pvclock_vcpu_time_info *hv_clock)
1163
{
1164
struct kvm_hv *hv = to_kvm_hv(kvm);
1165
u32 tsc_seq;
1166
u64 gfn;
1167
1168
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1169
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1170
1171
guard(mutex)(&hv->hv_lock);
1172
1173
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1174
hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
1175
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1176
return;
1177
1178
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1179
return;
1180
1181
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1182
/*
1183
* Because the TSC parameters only vary when there is a
1184
* change in the master clock, do not bother with caching.
1185
*/
1186
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1187
&tsc_seq, sizeof(tsc_seq))))
1188
goto out_err;
1189
1190
if (tsc_seq && tsc_page_update_unsafe(hv)) {
1191
if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1192
goto out_err;
1193
1194
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1195
return;
1196
}
1197
1198
/*
1199
* While we're computing and writing the parameters, force the
1200
* guest to use the time reference count MSR.
1201
*/
1202
hv->tsc_ref.tsc_sequence = 0;
1203
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1204
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1205
goto out_err;
1206
1207
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1208
goto out_err;
1209
1210
/* Ensure sequence is zero before writing the rest of the struct. */
1211
smp_wmb();
1212
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1213
goto out_err;
1214
1215
/*
1216
* Now switch to the TSC page mechanism by writing the sequence.
1217
*/
1218
tsc_seq++;
1219
if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1220
tsc_seq = 1;
1221
1222
/* Write the struct entirely before the non-zero sequence. */
1223
smp_wmb();
1224
1225
hv->tsc_ref.tsc_sequence = tsc_seq;
1226
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1227
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1228
goto out_err;
1229
1230
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1231
return;
1232
1233
out_err:
1234
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1235
}
1236
1237
void kvm_hv_request_tsc_page_update(struct kvm *kvm)
1238
{
1239
struct kvm_hv *hv = to_kvm_hv(kvm);
1240
1241
mutex_lock(&hv->hv_lock);
1242
1243
if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
1244
!tsc_page_update_unsafe(hv))
1245
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1246
1247
mutex_unlock(&hv->hv_lock);
1248
}
1249
1250
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1251
{
1252
if (!hv_vcpu->enforce_cpuid)
1253
return true;
1254
1255
switch (msr) {
1256
case HV_X64_MSR_GUEST_OS_ID:
1257
case HV_X64_MSR_HYPERCALL:
1258
return hv_vcpu->cpuid_cache.features_eax &
1259
HV_MSR_HYPERCALL_AVAILABLE;
1260
case HV_X64_MSR_VP_RUNTIME:
1261
return hv_vcpu->cpuid_cache.features_eax &
1262
HV_MSR_VP_RUNTIME_AVAILABLE;
1263
case HV_X64_MSR_TIME_REF_COUNT:
1264
return hv_vcpu->cpuid_cache.features_eax &
1265
HV_MSR_TIME_REF_COUNT_AVAILABLE;
1266
case HV_X64_MSR_VP_INDEX:
1267
return hv_vcpu->cpuid_cache.features_eax &
1268
HV_MSR_VP_INDEX_AVAILABLE;
1269
case HV_X64_MSR_RESET:
1270
return hv_vcpu->cpuid_cache.features_eax &
1271
HV_MSR_RESET_AVAILABLE;
1272
case HV_X64_MSR_REFERENCE_TSC:
1273
return hv_vcpu->cpuid_cache.features_eax &
1274
HV_MSR_REFERENCE_TSC_AVAILABLE;
1275
case HV_X64_MSR_SCONTROL:
1276
case HV_X64_MSR_SVERSION:
1277
case HV_X64_MSR_SIEFP:
1278
case HV_X64_MSR_SIMP:
1279
case HV_X64_MSR_EOM:
1280
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1281
return hv_vcpu->cpuid_cache.features_eax &
1282
HV_MSR_SYNIC_AVAILABLE;
1283
case HV_X64_MSR_STIMER0_CONFIG:
1284
case HV_X64_MSR_STIMER1_CONFIG:
1285
case HV_X64_MSR_STIMER2_CONFIG:
1286
case HV_X64_MSR_STIMER3_CONFIG:
1287
case HV_X64_MSR_STIMER0_COUNT:
1288
case HV_X64_MSR_STIMER1_COUNT:
1289
case HV_X64_MSR_STIMER2_COUNT:
1290
case HV_X64_MSR_STIMER3_COUNT:
1291
return hv_vcpu->cpuid_cache.features_eax &
1292
HV_MSR_SYNTIMER_AVAILABLE;
1293
case HV_X64_MSR_EOI:
1294
case HV_X64_MSR_ICR:
1295
case HV_X64_MSR_TPR:
1296
case HV_X64_MSR_VP_ASSIST_PAGE:
1297
return hv_vcpu->cpuid_cache.features_eax &
1298
HV_MSR_APIC_ACCESS_AVAILABLE;
1299
case HV_X64_MSR_TSC_FREQUENCY:
1300
case HV_X64_MSR_APIC_FREQUENCY:
1301
return hv_vcpu->cpuid_cache.features_eax &
1302
HV_ACCESS_FREQUENCY_MSRS;
1303
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1304
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1305
case HV_X64_MSR_TSC_EMULATION_STATUS:
1306
return hv_vcpu->cpuid_cache.features_eax &
1307
HV_ACCESS_REENLIGHTENMENT;
1308
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1309
return hv_vcpu->cpuid_cache.features_eax &
1310
HV_ACCESS_TSC_INVARIANT;
1311
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1312
case HV_X64_MSR_CRASH_CTL:
1313
return hv_vcpu->cpuid_cache.features_edx &
1314
HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1315
case HV_X64_MSR_SYNDBG_OPTIONS:
1316
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1317
return hv_vcpu->cpuid_cache.features_edx &
1318
HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1319
default:
1320
break;
1321
}
1322
1323
return false;
1324
}
1325
1326
#define KVM_HV_WIN2016_GUEST_ID 0x1040a00003839
1327
#define KVM_HV_WIN2016_GUEST_ID_MASK (~GENMASK_ULL(23, 16)) /* mask out the service version */
1328
1329
/*
1330
* Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
1331
* configuration.
1332
* Such configuration can result from, for example, AMD Erratum 1386 workaround.
1333
*
1334
* Print a notice so users aren't left wondering what's suddenly gone wrong.
1335
*/
1336
static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
1337
{
1338
struct kvm *kvm = vcpu->kvm;
1339
struct kvm_hv *hv = to_kvm_hv(kvm);
1340
1341
/* Check again under the hv_lock. */
1342
if (hv->xsaves_xsavec_checked)
1343
return;
1344
1345
if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) !=
1346
KVM_HV_WIN2016_GUEST_ID)
1347
return;
1348
1349
hv->xsaves_xsavec_checked = true;
1350
1351
/* UP configurations aren't affected */
1352
if (atomic_read(&kvm->online_vcpus) < 2)
1353
return;
1354
1355
if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) ||
1356
!guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVEC))
1357
return;
1358
1359
pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. "
1360
"If it fails to boot try disabling XSAVEC in the VM config.\n");
1361
}
1362
1363
void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
1364
{
1365
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1366
1367
if (!vcpu->arch.hyperv_enabled ||
1368
hv->xsaves_xsavec_checked)
1369
return;
1370
1371
mutex_lock(&hv->hv_lock);
1372
__kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
1373
mutex_unlock(&hv->hv_lock);
1374
}
1375
1376
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1377
bool host)
1378
{
1379
struct kvm *kvm = vcpu->kvm;
1380
struct kvm_hv *hv = to_kvm_hv(kvm);
1381
1382
if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1383
return 1;
1384
1385
switch (msr) {
1386
case HV_X64_MSR_GUEST_OS_ID:
1387
hv->hv_guest_os_id = data;
1388
/* setting guest os id to zero disables hypercall page */
1389
if (!hv->hv_guest_os_id)
1390
hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1391
break;
1392
case HV_X64_MSR_HYPERCALL: {
1393
u8 instructions[9];
1394
int i = 0;
1395
u64 addr;
1396
1397
/* if guest os id is not set hypercall should remain disabled */
1398
if (!hv->hv_guest_os_id)
1399
break;
1400
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1401
hv->hv_hypercall = data;
1402
break;
1403
}
1404
1405
/*
1406
* If Xen and Hyper-V hypercalls are both enabled, disambiguate
1407
* the same way Xen itself does, by setting the bit 31 of EAX
1408
* which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1409
* going to be clobbered on 64-bit.
1410
*/
1411
if (kvm_xen_hypercall_enabled(kvm)) {
1412
/* orl $0x80000000, %eax */
1413
instructions[i++] = 0x0d;
1414
instructions[i++] = 0x00;
1415
instructions[i++] = 0x00;
1416
instructions[i++] = 0x00;
1417
instructions[i++] = 0x80;
1418
}
1419
1420
/* vmcall/vmmcall */
1421
kvm_x86_call(patch_hypercall)(vcpu, instructions + i);
1422
i += 3;
1423
1424
/* ret */
1425
((unsigned char *)instructions)[i++] = 0xc3;
1426
1427
addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1428
if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1429
return 1;
1430
hv->hv_hypercall = data;
1431
break;
1432
}
1433
case HV_X64_MSR_REFERENCE_TSC:
1434
hv->hv_tsc_page = data;
1435
if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1436
if (!host)
1437
hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1438
else
1439
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1440
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1441
} else {
1442
hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1443
}
1444
break;
1445
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1446
return kvm_hv_msr_set_crash_data(kvm,
1447
msr - HV_X64_MSR_CRASH_P0,
1448
data);
1449
case HV_X64_MSR_CRASH_CTL:
1450
if (host)
1451
return kvm_hv_msr_set_crash_ctl(kvm, data);
1452
1453
if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1454
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1455
hv->hv_crash_param[0],
1456
hv->hv_crash_param[1],
1457
hv->hv_crash_param[2],
1458
hv->hv_crash_param[3],
1459
hv->hv_crash_param[4]);
1460
1461
/* Send notification about crash to user space */
1462
kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1463
}
1464
break;
1465
case HV_X64_MSR_RESET:
1466
if (data == 1) {
1467
vcpu_debug(vcpu, "hyper-v reset requested\n");
1468
kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1469
}
1470
break;
1471
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1472
hv->hv_reenlightenment_control = data;
1473
break;
1474
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1475
hv->hv_tsc_emulation_control = data;
1476
break;
1477
case HV_X64_MSR_TSC_EMULATION_STATUS:
1478
if (data && !host)
1479
return 1;
1480
1481
hv->hv_tsc_emulation_status = data;
1482
break;
1483
case HV_X64_MSR_TIME_REF_COUNT:
1484
/* read-only, but still ignore it if host-initiated */
1485
if (!host)
1486
return 1;
1487
break;
1488
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1489
/* Only bit 0 is supported */
1490
if (data & ~HV_EXPOSE_INVARIANT_TSC)
1491
return 1;
1492
1493
/* The feature can't be disabled from the guest */
1494
if (!host && hv->hv_invtsc_control && !data)
1495
return 1;
1496
1497
hv->hv_invtsc_control = data;
1498
break;
1499
case HV_X64_MSR_SYNDBG_OPTIONS:
1500
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1501
return syndbg_set_msr(vcpu, msr, data, host);
1502
default:
1503
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1504
return 1;
1505
}
1506
return 0;
1507
}
1508
1509
/* Calculate cpu time spent by current task in 100ns units */
1510
static u64 current_task_runtime_100ns(void)
1511
{
1512
u64 utime, stime;
1513
1514
task_cputime_adjusted(current, &utime, &stime);
1515
1516
return div_u64(utime + stime, 100);
1517
}
1518
1519
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1520
{
1521
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1522
1523
if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1524
return 1;
1525
1526
switch (msr) {
1527
case HV_X64_MSR_VP_INDEX: {
1528
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1529
u32 new_vp_index = (u32)data;
1530
1531
if (!host || new_vp_index >= KVM_MAX_VCPUS)
1532
return 1;
1533
1534
if (new_vp_index == hv_vcpu->vp_index)
1535
return 0;
1536
1537
/*
1538
* The VP index is initialized to vcpu_index by
1539
* kvm_hv_vcpu_postcreate so they initially match. Now the
1540
* VP index is changing, adjust num_mismatched_vp_indexes if
1541
* it now matches or no longer matches vcpu_idx.
1542
*/
1543
if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1544
atomic_inc(&hv->num_mismatched_vp_indexes);
1545
else if (new_vp_index == vcpu->vcpu_idx)
1546
atomic_dec(&hv->num_mismatched_vp_indexes);
1547
1548
hv_vcpu->vp_index = new_vp_index;
1549
break;
1550
}
1551
case HV_X64_MSR_VP_ASSIST_PAGE: {
1552
u64 gfn;
1553
unsigned long addr;
1554
1555
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1556
hv_vcpu->hv_vapic = data;
1557
if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1558
return 1;
1559
break;
1560
}
1561
gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1562
addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1563
if (kvm_is_error_hva(addr))
1564
return 1;
1565
1566
/*
1567
* Clear apic_assist portion of struct hv_vp_assist_page
1568
* only, there can be valuable data in the rest which needs
1569
* to be preserved e.g. on migration.
1570
*/
1571
if (__put_user(0, (u32 __user *)addr))
1572
return 1;
1573
hv_vcpu->hv_vapic = data;
1574
kvm_vcpu_mark_page_dirty(vcpu, gfn);
1575
if (kvm_lapic_set_pv_eoi(vcpu,
1576
gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1577
sizeof(struct hv_vp_assist_page)))
1578
return 1;
1579
break;
1580
}
1581
case HV_X64_MSR_EOI:
1582
return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1583
case HV_X64_MSR_ICR:
1584
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1585
case HV_X64_MSR_TPR:
1586
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1587
case HV_X64_MSR_VP_RUNTIME:
1588
if (!host)
1589
return 1;
1590
hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1591
break;
1592
case HV_X64_MSR_SCONTROL:
1593
case HV_X64_MSR_SVERSION:
1594
case HV_X64_MSR_SIEFP:
1595
case HV_X64_MSR_SIMP:
1596
case HV_X64_MSR_EOM:
1597
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1598
return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1599
case HV_X64_MSR_STIMER0_CONFIG:
1600
case HV_X64_MSR_STIMER1_CONFIG:
1601
case HV_X64_MSR_STIMER2_CONFIG:
1602
case HV_X64_MSR_STIMER3_CONFIG: {
1603
int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1604
1605
return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1606
data, host);
1607
}
1608
case HV_X64_MSR_STIMER0_COUNT:
1609
case HV_X64_MSR_STIMER1_COUNT:
1610
case HV_X64_MSR_STIMER2_COUNT:
1611
case HV_X64_MSR_STIMER3_COUNT: {
1612
int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1613
1614
return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1615
data, host);
1616
}
1617
case HV_X64_MSR_TSC_FREQUENCY:
1618
case HV_X64_MSR_APIC_FREQUENCY:
1619
/* read-only, but still ignore it if host-initiated */
1620
if (!host)
1621
return 1;
1622
break;
1623
default:
1624
kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1625
return 1;
1626
}
1627
1628
return 0;
1629
}
1630
1631
static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1632
bool host)
1633
{
1634
u64 data = 0;
1635
struct kvm *kvm = vcpu->kvm;
1636
struct kvm_hv *hv = to_kvm_hv(kvm);
1637
1638
if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1639
return 1;
1640
1641
switch (msr) {
1642
case HV_X64_MSR_GUEST_OS_ID:
1643
data = hv->hv_guest_os_id;
1644
break;
1645
case HV_X64_MSR_HYPERCALL:
1646
data = hv->hv_hypercall;
1647
break;
1648
case HV_X64_MSR_TIME_REF_COUNT:
1649
data = get_time_ref_counter(kvm);
1650
break;
1651
case HV_X64_MSR_REFERENCE_TSC:
1652
data = hv->hv_tsc_page;
1653
break;
1654
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1655
return kvm_hv_msr_get_crash_data(kvm,
1656
msr - HV_X64_MSR_CRASH_P0,
1657
pdata);
1658
case HV_X64_MSR_CRASH_CTL:
1659
return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1660
case HV_X64_MSR_RESET:
1661
data = 0;
1662
break;
1663
case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1664
data = hv->hv_reenlightenment_control;
1665
break;
1666
case HV_X64_MSR_TSC_EMULATION_CONTROL:
1667
data = hv->hv_tsc_emulation_control;
1668
break;
1669
case HV_X64_MSR_TSC_EMULATION_STATUS:
1670
data = hv->hv_tsc_emulation_status;
1671
break;
1672
case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1673
data = hv->hv_invtsc_control;
1674
break;
1675
case HV_X64_MSR_SYNDBG_OPTIONS:
1676
case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1677
return syndbg_get_msr(vcpu, msr, pdata, host);
1678
default:
1679
kvm_pr_unimpl_rdmsr(vcpu, msr);
1680
return 1;
1681
}
1682
1683
*pdata = data;
1684
return 0;
1685
}
1686
1687
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1688
bool host)
1689
{
1690
u64 data = 0;
1691
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1692
1693
if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1694
return 1;
1695
1696
switch (msr) {
1697
case HV_X64_MSR_VP_INDEX:
1698
data = hv_vcpu->vp_index;
1699
break;
1700
case HV_X64_MSR_EOI:
1701
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1702
case HV_X64_MSR_ICR:
1703
return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1704
case HV_X64_MSR_TPR:
1705
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1706
case HV_X64_MSR_VP_ASSIST_PAGE:
1707
data = hv_vcpu->hv_vapic;
1708
break;
1709
case HV_X64_MSR_VP_RUNTIME:
1710
data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1711
break;
1712
case HV_X64_MSR_SCONTROL:
1713
case HV_X64_MSR_SVERSION:
1714
case HV_X64_MSR_SIEFP:
1715
case HV_X64_MSR_SIMP:
1716
case HV_X64_MSR_EOM:
1717
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1718
return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1719
case HV_X64_MSR_STIMER0_CONFIG:
1720
case HV_X64_MSR_STIMER1_CONFIG:
1721
case HV_X64_MSR_STIMER2_CONFIG:
1722
case HV_X64_MSR_STIMER3_CONFIG: {
1723
int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1724
1725
return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1726
pdata);
1727
}
1728
case HV_X64_MSR_STIMER0_COUNT:
1729
case HV_X64_MSR_STIMER1_COUNT:
1730
case HV_X64_MSR_STIMER2_COUNT:
1731
case HV_X64_MSR_STIMER3_COUNT: {
1732
int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1733
1734
return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1735
pdata);
1736
}
1737
case HV_X64_MSR_TSC_FREQUENCY:
1738
data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1739
break;
1740
case HV_X64_MSR_APIC_FREQUENCY:
1741
data = div64_u64(1000000000ULL,
1742
vcpu->kvm->arch.apic_bus_cycle_ns);
1743
break;
1744
default:
1745
kvm_pr_unimpl_rdmsr(vcpu, msr);
1746
return 1;
1747
}
1748
*pdata = data;
1749
return 0;
1750
}
1751
1752
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1753
{
1754
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1755
1756
if (!host && !vcpu->arch.hyperv_enabled)
1757
return 1;
1758
1759
if (kvm_hv_vcpu_init(vcpu))
1760
return 1;
1761
1762
if (kvm_hv_msr_partition_wide(msr)) {
1763
int r;
1764
1765
mutex_lock(&hv->hv_lock);
1766
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1767
mutex_unlock(&hv->hv_lock);
1768
return r;
1769
} else
1770
return kvm_hv_set_msr(vcpu, msr, data, host);
1771
}
1772
1773
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1774
{
1775
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1776
1777
if (!host && !vcpu->arch.hyperv_enabled)
1778
return 1;
1779
1780
if (kvm_hv_vcpu_init(vcpu))
1781
return 1;
1782
1783
if (kvm_hv_msr_partition_wide(msr)) {
1784
int r;
1785
1786
mutex_lock(&hv->hv_lock);
1787
r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1788
mutex_unlock(&hv->hv_lock);
1789
return r;
1790
} else
1791
return kvm_hv_get_msr(vcpu, msr, pdata, host);
1792
}
1793
1794
static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
1795
u64 valid_bank_mask, unsigned long *vcpu_mask)
1796
{
1797
struct kvm_hv *hv = to_kvm_hv(kvm);
1798
bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
1799
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1800
struct kvm_vcpu *vcpu;
1801
int bank, sbank = 0;
1802
unsigned long i;
1803
u64 *bitmap;
1804
1805
BUILD_BUG_ON(sizeof(vp_bitmap) >
1806
sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
1807
1808
/*
1809
* If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
1810
* fill a temporary buffer and manually test each vCPU's VP index.
1811
*/
1812
if (likely(!has_mismatch))
1813
bitmap = (u64 *)vcpu_mask;
1814
else
1815
bitmap = vp_bitmap;
1816
1817
/*
1818
* Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
1819
* having a '1' for each bank that exists in sparse_banks. Sets must
1820
* be in ascending order, i.e. bank0..bankN.
1821
*/
1822
memset(bitmap, 0, sizeof(vp_bitmap));
1823
for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1824
KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1825
bitmap[bank] = sparse_banks[sbank++];
1826
1827
if (likely(!has_mismatch))
1828
return;
1829
1830
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1831
kvm_for_each_vcpu(i, vcpu, kvm) {
1832
if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1833
__set_bit(i, vcpu_mask);
1834
}
1835
}
1836
1837
static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
1838
{
1839
int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK;
1840
unsigned long sbank;
1841
1842
if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask))
1843
return false;
1844
1845
/*
1846
* The index into the sparse bank is the number of preceding bits in
1847
* the valid mask. Optimize for VMs with <64 vCPUs by skipping the
1848
* fancy math if there can't possibly be preceding bits.
1849
*/
1850
if (valid_bit_nr)
1851
sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
1852
else
1853
sbank = 0;
1854
1855
return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK,
1856
(unsigned long *)&sparse_banks[sbank]);
1857
}
1858
1859
struct kvm_hv_hcall {
1860
/* Hypercall input data */
1861
u64 param;
1862
u64 ingpa;
1863
u64 outgpa;
1864
u16 code;
1865
u16 var_cnt;
1866
u16 rep_cnt;
1867
u16 rep_idx;
1868
bool fast;
1869
bool rep;
1870
sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1871
1872
/*
1873
* Current read offset when KVM reads hypercall input data gradually,
1874
* either offset in bytes from 'ingpa' for regular hypercalls or the
1875
* number of already consumed 'XMM halves' for 'fast' hypercalls.
1876
*/
1877
union {
1878
gpa_t data_offset;
1879
int consumed_xmm_halves;
1880
};
1881
};
1882
1883
1884
static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
1885
u16 orig_cnt, u16 cnt_cap, u64 *data)
1886
{
1887
/*
1888
* Preserve the original count when ignoring entries via a "cap", KVM
1889
* still needs to validate the guest input (though the non-XMM path
1890
* punts on the checks).
1891
*/
1892
u16 cnt = min(orig_cnt, cnt_cap);
1893
int i, j;
1894
1895
if (hc->fast) {
1896
/*
1897
* Each XMM holds two sparse banks, but do not count halves that
1898
* have already been consumed for hypercall parameters.
1899
*/
1900
if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
1901
return HV_STATUS_INVALID_HYPERCALL_INPUT;
1902
1903
for (i = 0; i < cnt; i++) {
1904
j = i + hc->consumed_xmm_halves;
1905
if (j % 2)
1906
data[i] = sse128_hi(hc->xmm[j / 2]);
1907
else
1908
data[i] = sse128_lo(hc->xmm[j / 2]);
1909
}
1910
return 0;
1911
}
1912
1913
return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
1914
cnt * sizeof(*data));
1915
}
1916
1917
static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1918
u64 *sparse_banks)
1919
{
1920
if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
1921
return -EINVAL;
1922
1923
/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
1924
return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
1925
sparse_banks);
1926
}
1927
1928
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
1929
{
1930
return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
1931
}
1932
1933
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
1934
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
1935
u64 *entries, int count)
1936
{
1937
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1938
u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
1939
1940
if (!hv_vcpu)
1941
return;
1942
1943
spin_lock(&tlb_flush_fifo->write_lock);
1944
1945
/*
1946
* All entries should fit on the fifo leaving one free for 'flush all'
1947
* entry in case another request comes in. In case there's not enough
1948
* space, just put 'flush all' entry there.
1949
*/
1950
if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
1951
WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
1952
goto out_unlock;
1953
}
1954
1955
/*
1956
* Note: full fifo always contains 'flush all' entry, no need to check the
1957
* return value.
1958
*/
1959
kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
1960
1961
out_unlock:
1962
spin_unlock(&tlb_flush_fifo->write_lock);
1963
}
1964
1965
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
1966
{
1967
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
1968
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1969
u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
1970
int i, j, count;
1971
gva_t gva;
1972
1973
if (!tdp_enabled || !hv_vcpu)
1974
return -EINVAL;
1975
1976
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
1977
1978
count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
1979
1980
for (i = 0; i < count; i++) {
1981
if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
1982
goto out_flush_all;
1983
1984
if (is_noncanonical_invlpg_address(entries[i], vcpu))
1985
continue;
1986
1987
/*
1988
* Lower 12 bits of 'address' encode the number of additional
1989
* pages to flush.
1990
*/
1991
gva = entries[i] & PAGE_MASK;
1992
for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
1993
kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
1994
1995
++vcpu->stat.tlb_flush;
1996
}
1997
return 0;
1998
1999
out_flush_all:
2000
kfifo_reset_out(&tlb_flush_fifo->entries);
2001
2002
/* Fall back to full flush. */
2003
return -ENOSPC;
2004
}
2005
2006
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2007
{
2008
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2009
unsigned long *vcpu_mask = hv_vcpu->vcpu_mask;
2010
u64 *sparse_banks = hv_vcpu->sparse_banks;
2011
struct kvm *kvm = vcpu->kvm;
2012
struct hv_tlb_flush_ex flush_ex;
2013
struct hv_tlb_flush flush;
2014
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
2015
/*
2016
* Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
2017
* entries on the TLB flush fifo. The last entry, however, needs to be
2018
* always left free for 'flush all' entry which gets placed when
2019
* there is not enough space to put all the requested entries.
2020
*/
2021
u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
2022
u64 *tlb_flush_entries;
2023
u64 valid_bank_mask;
2024
struct kvm_vcpu *v;
2025
unsigned long i;
2026
bool all_cpus;
2027
2028
/*
2029
* The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
2030
* sparse banks. Fail the build if KVM's max allowed number of
2031
* vCPUs (>4096) exceeds this limit.
2032
*/
2033
BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS);
2034
2035
/*
2036
* 'Slow' hypercall's first parameter is the address in guest's memory
2037
* where hypercall parameters are placed. This is either a GPA or a
2038
* nested GPA when KVM is handling the call from L2 ('direct' TLB
2039
* flush). Translate the address here so the memory can be uniformly
2040
* read with kvm_read_guest().
2041
*/
2042
if (!hc->fast && is_guest_mode(vcpu)) {
2043
hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
2044
if (unlikely(hc->ingpa == INVALID_GPA))
2045
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2046
}
2047
2048
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
2049
hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
2050
if (hc->fast) {
2051
flush.address_space = hc->ingpa;
2052
flush.flags = hc->outgpa;
2053
flush.processor_mask = sse128_lo(hc->xmm[0]);
2054
hc->consumed_xmm_halves = 1;
2055
} else {
2056
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
2057
&flush, sizeof(flush))))
2058
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2059
hc->data_offset = sizeof(flush);
2060
}
2061
2062
trace_kvm_hv_flush_tlb(flush.processor_mask,
2063
flush.address_space, flush.flags,
2064
is_guest_mode(vcpu));
2065
2066
valid_bank_mask = BIT_ULL(0);
2067
sparse_banks[0] = flush.processor_mask;
2068
2069
/*
2070
* Work around possible WS2012 bug: it sends hypercalls
2071
* with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
2072
* while also expecting us to flush something and crashing if
2073
* we don't. Let's treat processor_mask == 0 same as
2074
* HV_FLUSH_ALL_PROCESSORS.
2075
*/
2076
all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
2077
flush.processor_mask == 0;
2078
} else {
2079
if (hc->fast) {
2080
flush_ex.address_space = hc->ingpa;
2081
flush_ex.flags = hc->outgpa;
2082
memcpy(&flush_ex.hv_vp_set,
2083
&hc->xmm[0], sizeof(hc->xmm[0]));
2084
hc->consumed_xmm_halves = 2;
2085
} else {
2086
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
2087
sizeof(flush_ex))))
2088
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2089
hc->data_offset = sizeof(flush_ex);
2090
}
2091
2092
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
2093
flush_ex.hv_vp_set.format,
2094
flush_ex.address_space,
2095
flush_ex.flags, is_guest_mode(vcpu));
2096
2097
valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
2098
all_cpus = flush_ex.hv_vp_set.format !=
2099
HV_GENERIC_SET_SPARSE_4K;
2100
2101
if (hc->var_cnt != hweight64(valid_bank_mask))
2102
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2103
2104
if (!all_cpus) {
2105
if (!hc->var_cnt)
2106
goto ret_success;
2107
2108
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2109
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2110
}
2111
2112
/*
2113
* Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
2114
* banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
2115
* case (HV_GENERIC_SET_ALL). Always adjust data_offset and
2116
* consumed_xmm_halves to make sure TLB flush entries are read
2117
* from the correct offset.
2118
*/
2119
if (hc->fast)
2120
hc->consumed_xmm_halves += hc->var_cnt;
2121
else
2122
hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
2123
}
2124
2125
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
2126
hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
2127
hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
2128
tlb_flush_entries = NULL;
2129
} else {
2130
if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
2131
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2132
tlb_flush_entries = __tlb_flush_entries;
2133
}
2134
2135
/*
2136
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
2137
* analyze it here, flush TLB regardless of the specified address space.
2138
*/
2139
if (all_cpus && !is_guest_mode(vcpu)) {
2140
kvm_for_each_vcpu(i, v, kvm) {
2141
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2142
hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2143
tlb_flush_entries, hc->rep_cnt);
2144
}
2145
2146
kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
2147
} else if (!is_guest_mode(vcpu)) {
2148
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
2149
2150
for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
2151
v = kvm_get_vcpu(kvm, i);
2152
if (!v)
2153
continue;
2154
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2155
hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2156
tlb_flush_entries, hc->rep_cnt);
2157
}
2158
2159
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2160
} else {
2161
struct kvm_vcpu_hv *hv_v;
2162
2163
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
2164
2165
kvm_for_each_vcpu(i, v, kvm) {
2166
hv_v = to_hv_vcpu(v);
2167
2168
/*
2169
* The following check races with nested vCPUs entering/exiting
2170
* and/or migrating between L1's vCPUs, however the only case when
2171
* KVM *must* flush the TLB is when the target L2 vCPU keeps
2172
* running on the same L1 vCPU from the moment of the request until
2173
* kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
2174
* cases, e.g. when the target L2 vCPU migrates to a different L1
2175
* vCPU or when the corresponding L1 vCPU temporary switches to a
2176
* different L2 vCPU while the request is being processed.
2177
*/
2178
if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
2179
continue;
2180
2181
if (!all_cpus &&
2182
!hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
2183
sparse_banks))
2184
continue;
2185
2186
__set_bit(i, vcpu_mask);
2187
tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
2188
hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2189
tlb_flush_entries, hc->rep_cnt);
2190
}
2191
2192
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2193
}
2194
2195
ret_success:
2196
/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
2197
return (u64)HV_STATUS_SUCCESS |
2198
((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
2199
}
2200
2201
static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
2202
u64 *sparse_banks, u64 valid_bank_mask)
2203
{
2204
struct kvm_lapic_irq irq = {
2205
.delivery_mode = APIC_DM_FIXED,
2206
.vector = vector
2207
};
2208
struct kvm_vcpu *vcpu;
2209
unsigned long i;
2210
2211
kvm_for_each_vcpu(i, vcpu, kvm) {
2212
if (sparse_banks &&
2213
!hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu),
2214
valid_bank_mask, sparse_banks))
2215
continue;
2216
2217
/* We fail only when APIC is disabled */
2218
kvm_apic_set_irq(vcpu, &irq, NULL);
2219
}
2220
}
2221
2222
static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2223
{
2224
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2225
u64 *sparse_banks = hv_vcpu->sparse_banks;
2226
struct kvm *kvm = vcpu->kvm;
2227
struct hv_send_ipi_ex send_ipi_ex;
2228
struct hv_send_ipi send_ipi;
2229
u64 valid_bank_mask;
2230
u32 vector;
2231
bool all_cpus;
2232
2233
if (!lapic_in_kernel(vcpu))
2234
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2235
2236
if (hc->code == HVCALL_SEND_IPI) {
2237
if (!hc->fast) {
2238
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
2239
sizeof(send_ipi))))
2240
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2241
sparse_banks[0] = send_ipi.cpu_mask;
2242
vector = send_ipi.vector;
2243
} else {
2244
/* 'reserved' part of hv_send_ipi should be 0 */
2245
if (unlikely(hc->ingpa >> 32 != 0))
2246
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2247
sparse_banks[0] = hc->outgpa;
2248
vector = (u32)hc->ingpa;
2249
}
2250
all_cpus = false;
2251
valid_bank_mask = BIT_ULL(0);
2252
2253
trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
2254
} else {
2255
if (!hc->fast) {
2256
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
2257
sizeof(send_ipi_ex))))
2258
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2259
} else {
2260
send_ipi_ex.vector = (u32)hc->ingpa;
2261
send_ipi_ex.vp_set.format = hc->outgpa;
2262
send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
2263
}
2264
2265
trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
2266
send_ipi_ex.vp_set.format,
2267
send_ipi_ex.vp_set.valid_bank_mask);
2268
2269
vector = send_ipi_ex.vector;
2270
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
2271
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
2272
2273
if (hc->var_cnt != hweight64(valid_bank_mask))
2274
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2275
2276
if (all_cpus)
2277
goto check_and_send_ipi;
2278
2279
if (!hc->var_cnt)
2280
goto ret_success;
2281
2282
if (!hc->fast)
2283
hc->data_offset = offsetof(struct hv_send_ipi_ex,
2284
vp_set.bank_contents);
2285
else
2286
hc->consumed_xmm_halves = 1;
2287
2288
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2289
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2290
}
2291
2292
check_and_send_ipi:
2293
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
2294
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2295
2296
if (all_cpus)
2297
kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0);
2298
else
2299
kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask);
2300
2301
ret_success:
2302
return HV_STATUS_SUCCESS;
2303
}
2304
2305
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
2306
{
2307
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2308
struct kvm_cpuid_entry2 *entry;
2309
2310
vcpu->arch.hyperv_enabled = hyperv_enabled;
2311
2312
if (!hv_vcpu) {
2313
/*
2314
* KVM should have already allocated kvm_vcpu_hv if Hyper-V is
2315
* enabled in CPUID.
2316
*/
2317
WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
2318
return;
2319
}
2320
2321
memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
2322
2323
if (!vcpu->arch.hyperv_enabled)
2324
return;
2325
2326
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
2327
if (entry) {
2328
hv_vcpu->cpuid_cache.features_eax = entry->eax;
2329
hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2330
hv_vcpu->cpuid_cache.features_edx = entry->edx;
2331
}
2332
2333
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
2334
if (entry) {
2335
hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2336
hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2337
}
2338
2339
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
2340
if (entry)
2341
hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2342
2343
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
2344
if (entry) {
2345
hv_vcpu->cpuid_cache.nested_eax = entry->eax;
2346
hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
2347
}
2348
}
2349
2350
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2351
{
2352
struct kvm_vcpu_hv *hv_vcpu;
2353
int ret = 0;
2354
2355
if (!to_hv_vcpu(vcpu)) {
2356
if (enforce) {
2357
ret = kvm_hv_vcpu_init(vcpu);
2358
if (ret)
2359
return ret;
2360
} else {
2361
return 0;
2362
}
2363
}
2364
2365
hv_vcpu = to_hv_vcpu(vcpu);
2366
hv_vcpu->enforce_cpuid = enforce;
2367
2368
return ret;
2369
}
2370
2371
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2372
{
2373
bool longmode;
2374
2375
longmode = is_64_bit_hypercall(vcpu);
2376
if (longmode)
2377
kvm_rax_write(vcpu, result);
2378
else {
2379
kvm_rdx_write(vcpu, result >> 32);
2380
kvm_rax_write(vcpu, result & 0xffffffff);
2381
}
2382
}
2383
2384
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2385
{
2386
u32 tlb_lock_count = 0;
2387
int ret;
2388
2389
if (hv_result_success(result) && is_guest_mode(vcpu) &&
2390
kvm_hv_is_tlb_flush_hcall(vcpu) &&
2391
kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa,
2392
&tlb_lock_count, sizeof(tlb_lock_count)))
2393
result = HV_STATUS_INVALID_HYPERCALL_INPUT;
2394
2395
trace_kvm_hv_hypercall_done(result);
2396
kvm_hv_hypercall_set_result(vcpu, result);
2397
++vcpu->stat.hypercalls;
2398
2399
ret = kvm_skip_emulated_instruction(vcpu);
2400
2401
if (tlb_lock_count)
2402
kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
2403
2404
return ret;
2405
}
2406
2407
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2408
{
2409
return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2410
}
2411
2412
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2413
{
2414
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2415
struct eventfd_ctx *eventfd;
2416
2417
if (unlikely(!hc->fast)) {
2418
int ret;
2419
gpa_t gpa = hc->ingpa;
2420
2421
if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2422
offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2423
return HV_STATUS_INVALID_ALIGNMENT;
2424
2425
ret = kvm_vcpu_read_guest(vcpu, gpa,
2426
&hc->ingpa, sizeof(hc->ingpa));
2427
if (ret < 0)
2428
return HV_STATUS_INVALID_ALIGNMENT;
2429
}
2430
2431
/*
2432
* Per spec, bits 32-47 contain the extra "flag number". However, we
2433
* have no use for it, and in all known usecases it is zero, so just
2434
* report lookup failure if it isn't.
2435
*/
2436
if (hc->ingpa & 0xffff00000000ULL)
2437
return HV_STATUS_INVALID_PORT_ID;
2438
/* remaining bits are reserved-zero */
2439
if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2440
return HV_STATUS_INVALID_HYPERCALL_INPUT;
2441
2442
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2443
rcu_read_lock();
2444
eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2445
rcu_read_unlock();
2446
if (!eventfd)
2447
return HV_STATUS_INVALID_PORT_ID;
2448
2449
eventfd_signal(eventfd);
2450
return HV_STATUS_SUCCESS;
2451
}
2452
2453
static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2454
{
2455
switch (hc->code) {
2456
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2457
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2458
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2459
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2460
case HVCALL_SEND_IPI_EX:
2461
return true;
2462
}
2463
2464
return false;
2465
}
2466
2467
static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2468
{
2469
int reg;
2470
2471
kvm_fpu_get();
2472
for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2473
_kvm_read_sse_reg(reg, &hc->xmm[reg]);
2474
kvm_fpu_put();
2475
}
2476
2477
static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2478
{
2479
if (!hv_vcpu->enforce_cpuid)
2480
return true;
2481
2482
switch (code) {
2483
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2484
return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2485
hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2486
case HVCALL_POST_MESSAGE:
2487
return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2488
case HVCALL_SIGNAL_EVENT:
2489
return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2490
case HVCALL_POST_DEBUG_DATA:
2491
case HVCALL_RETRIEVE_DEBUG_DATA:
2492
case HVCALL_RESET_DEBUG_SESSION:
2493
/*
2494
* Return 'true' when SynDBG is disabled so the resulting code
2495
* will be HV_STATUS_INVALID_HYPERCALL_CODE.
2496
*/
2497
return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2498
hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2499
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2500
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2501
if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2502
HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2503
return false;
2504
fallthrough;
2505
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2506
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2507
return hv_vcpu->cpuid_cache.enlightenments_eax &
2508
HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2509
case HVCALL_SEND_IPI_EX:
2510
if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2511
HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2512
return false;
2513
fallthrough;
2514
case HVCALL_SEND_IPI:
2515
return hv_vcpu->cpuid_cache.enlightenments_eax &
2516
HV_X64_CLUSTER_IPI_RECOMMENDED;
2517
case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2518
return hv_vcpu->cpuid_cache.features_ebx &
2519
HV_ENABLE_EXTENDED_HYPERCALLS;
2520
default:
2521
break;
2522
}
2523
2524
return true;
2525
}
2526
2527
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2528
{
2529
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2530
struct kvm_hv_hcall hc;
2531
u64 ret = HV_STATUS_SUCCESS;
2532
2533
/*
2534
* hypercall generates UD from non zero cpl and real mode
2535
* per HYPER-V spec
2536
*/
2537
if (kvm_x86_call(get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2538
kvm_queue_exception(vcpu, UD_VECTOR);
2539
return 1;
2540
}
2541
2542
#ifdef CONFIG_X86_64
2543
if (is_64_bit_hypercall(vcpu)) {
2544
hc.param = kvm_rcx_read(vcpu);
2545
hc.ingpa = kvm_rdx_read(vcpu);
2546
hc.outgpa = kvm_r8_read(vcpu);
2547
} else
2548
#endif
2549
{
2550
hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2551
(kvm_rax_read(vcpu) & 0xffffffff);
2552
hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2553
(kvm_rcx_read(vcpu) & 0xffffffff);
2554
hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2555
(kvm_rsi_read(vcpu) & 0xffffffff);
2556
}
2557
2558
hc.code = hc.param & 0xffff;
2559
hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2560
hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2561
hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2562
hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2563
hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2564
2565
trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2566
hc.rep_idx, hc.ingpa, hc.outgpa);
2567
2568
if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2569
ret = HV_STATUS_ACCESS_DENIED;
2570
goto hypercall_complete;
2571
}
2572
2573
if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2574
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2575
goto hypercall_complete;
2576
}
2577
2578
if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2579
if (unlikely(hv_vcpu->enforce_cpuid &&
2580
!(hv_vcpu->cpuid_cache.features_edx &
2581
HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2582
kvm_queue_exception(vcpu, UD_VECTOR);
2583
return 1;
2584
}
2585
2586
kvm_hv_hypercall_read_xmm(&hc);
2587
}
2588
2589
switch (hc.code) {
2590
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2591
if (unlikely(hc.rep || hc.var_cnt)) {
2592
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2593
break;
2594
}
2595
kvm_vcpu_on_spin(vcpu, true);
2596
break;
2597
case HVCALL_SIGNAL_EVENT:
2598
if (unlikely(hc.rep || hc.var_cnt)) {
2599
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2600
break;
2601
}
2602
ret = kvm_hvcall_signal_event(vcpu, &hc);
2603
if (ret != HV_STATUS_INVALID_PORT_ID)
2604
break;
2605
fallthrough; /* maybe userspace knows this conn_id */
2606
case HVCALL_POST_MESSAGE:
2607
/* don't bother userspace if it has no way to handle it */
2608
if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2609
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2610
break;
2611
}
2612
goto hypercall_userspace_exit;
2613
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2614
if (unlikely(hc.var_cnt)) {
2615
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2616
break;
2617
}
2618
fallthrough;
2619
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2620
if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2621
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2622
break;
2623
}
2624
ret = kvm_hv_flush_tlb(vcpu, &hc);
2625
break;
2626
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2627
if (unlikely(hc.var_cnt)) {
2628
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2629
break;
2630
}
2631
fallthrough;
2632
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2633
if (unlikely(hc.rep)) {
2634
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2635
break;
2636
}
2637
ret = kvm_hv_flush_tlb(vcpu, &hc);
2638
break;
2639
case HVCALL_SEND_IPI:
2640
if (unlikely(hc.var_cnt)) {
2641
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2642
break;
2643
}
2644
fallthrough;
2645
case HVCALL_SEND_IPI_EX:
2646
if (unlikely(hc.rep)) {
2647
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2648
break;
2649
}
2650
ret = kvm_hv_send_ipi(vcpu, &hc);
2651
break;
2652
case HVCALL_POST_DEBUG_DATA:
2653
case HVCALL_RETRIEVE_DEBUG_DATA:
2654
if (unlikely(hc.fast)) {
2655
ret = HV_STATUS_INVALID_PARAMETER;
2656
break;
2657
}
2658
fallthrough;
2659
case HVCALL_RESET_DEBUG_SESSION: {
2660
struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2661
2662
if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2663
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2664
break;
2665
}
2666
2667
if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2668
ret = HV_STATUS_OPERATION_DENIED;
2669
break;
2670
}
2671
goto hypercall_userspace_exit;
2672
}
2673
case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2674
if (unlikely(hc.fast)) {
2675
ret = HV_STATUS_INVALID_PARAMETER;
2676
break;
2677
}
2678
goto hypercall_userspace_exit;
2679
default:
2680
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2681
break;
2682
}
2683
2684
hypercall_complete:
2685
return kvm_hv_hypercall_complete(vcpu, ret);
2686
2687
hypercall_userspace_exit:
2688
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2689
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2690
vcpu->run->hyperv.u.hcall.input = hc.param;
2691
vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2692
vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2693
vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
2694
return 0;
2695
}
2696
2697
void kvm_hv_init_vm(struct kvm *kvm)
2698
{
2699
struct kvm_hv *hv = to_kvm_hv(kvm);
2700
2701
mutex_init(&hv->hv_lock);
2702
idr_init(&hv->conn_to_evt);
2703
}
2704
2705
void kvm_hv_destroy_vm(struct kvm *kvm)
2706
{
2707
struct kvm_hv *hv = to_kvm_hv(kvm);
2708
struct eventfd_ctx *eventfd;
2709
int i;
2710
2711
idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2712
eventfd_ctx_put(eventfd);
2713
idr_destroy(&hv->conn_to_evt);
2714
}
2715
2716
static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2717
{
2718
struct kvm_hv *hv = to_kvm_hv(kvm);
2719
struct eventfd_ctx *eventfd;
2720
int ret;
2721
2722
eventfd = eventfd_ctx_fdget(fd);
2723
if (IS_ERR(eventfd))
2724
return PTR_ERR(eventfd);
2725
2726
mutex_lock(&hv->hv_lock);
2727
ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2728
GFP_KERNEL_ACCOUNT);
2729
mutex_unlock(&hv->hv_lock);
2730
2731
if (ret >= 0)
2732
return 0;
2733
2734
if (ret == -ENOSPC)
2735
ret = -EEXIST;
2736
eventfd_ctx_put(eventfd);
2737
return ret;
2738
}
2739
2740
static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2741
{
2742
struct kvm_hv *hv = to_kvm_hv(kvm);
2743
struct eventfd_ctx *eventfd;
2744
2745
mutex_lock(&hv->hv_lock);
2746
eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2747
mutex_unlock(&hv->hv_lock);
2748
2749
if (!eventfd)
2750
return -ENOENT;
2751
2752
synchronize_srcu(&kvm->srcu);
2753
eventfd_ctx_put(eventfd);
2754
return 0;
2755
}
2756
2757
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2758
{
2759
if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2760
(args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2761
return -EINVAL;
2762
2763
if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2764
return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2765
return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2766
}
2767
2768
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2769
struct kvm_cpuid_entry2 __user *entries)
2770
{
2771
uint16_t evmcs_ver = 0;
2772
struct kvm_cpuid_entry2 cpuid_entries[] = {
2773
{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2774
{ .function = HYPERV_CPUID_INTERFACE },
2775
{ .function = HYPERV_CPUID_VERSION },
2776
{ .function = HYPERV_CPUID_FEATURES },
2777
{ .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2778
{ .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2779
{ .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2780
{ .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2781
{ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2782
{ .function = HYPERV_CPUID_NESTED_FEATURES },
2783
};
2784
int i, nent = ARRAY_SIZE(cpuid_entries);
2785
2786
if (kvm_x86_ops.nested_ops->get_evmcs_version)
2787
evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2788
2789
if (cpuid->nent < nent)
2790
return -E2BIG;
2791
2792
if (cpuid->nent > nent)
2793
cpuid->nent = nent;
2794
2795
for (i = 0; i < nent; i++) {
2796
struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2797
u32 signature[3];
2798
2799
switch (ent->function) {
2800
case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2801
memcpy(signature, "Linux KVM Hv", 12);
2802
2803
ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2804
ent->ebx = signature[0];
2805
ent->ecx = signature[1];
2806
ent->edx = signature[2];
2807
break;
2808
2809
case HYPERV_CPUID_INTERFACE:
2810
ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2811
break;
2812
2813
case HYPERV_CPUID_VERSION:
2814
/*
2815
* We implement some Hyper-V 2016 functions so let's use
2816
* this version.
2817
*/
2818
ent->eax = 0x00003839;
2819
ent->ebx = 0x000A0000;
2820
break;
2821
2822
case HYPERV_CPUID_FEATURES:
2823
ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2824
ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2825
ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2826
ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2827
ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2828
ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2829
ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2830
ent->eax |= HV_MSR_RESET_AVAILABLE;
2831
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2832
ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2833
ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2834
ent->eax |= HV_ACCESS_TSC_INVARIANT;
2835
2836
ent->ebx |= HV_POST_MESSAGES;
2837
ent->ebx |= HV_SIGNAL_EVENTS;
2838
ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
2839
2840
ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2841
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2842
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2843
2844
ent->ebx |= HV_DEBUGGING;
2845
ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2846
ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2847
ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH;
2848
2849
/*
2850
* Direct Synthetic timers only make sense with in-kernel
2851
* LAPIC
2852
*/
2853
if (!vcpu || lapic_in_kernel(vcpu))
2854
ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2855
2856
break;
2857
2858
case HYPERV_CPUID_ENLIGHTMENT_INFO:
2859
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2860
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2861
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2862
if (!vcpu || lapic_in_kernel(vcpu))
2863
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2864
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2865
if (evmcs_ver)
2866
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2867
if (!cpu_smt_possible())
2868
ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2869
2870
ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2871
/*
2872
* Default number of spinlock retry attempts, matches
2873
* HyperV 2016.
2874
*/
2875
ent->ebx = 0x00000FFF;
2876
2877
break;
2878
2879
case HYPERV_CPUID_IMPLEMENT_LIMITS:
2880
/* Maximum number of virtual processors */
2881
ent->eax = KVM_MAX_VCPUS;
2882
/*
2883
* Maximum number of logical processors, matches
2884
* HyperV 2016.
2885
*/
2886
ent->ebx = 64;
2887
2888
break;
2889
2890
case HYPERV_CPUID_NESTED_FEATURES:
2891
ent->eax = evmcs_ver;
2892
ent->eax |= HV_X64_NESTED_DIRECT_FLUSH;
2893
ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2894
ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
2895
break;
2896
2897
case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2898
memcpy(signature, "Linux KVM Hv", 12);
2899
2900
ent->eax = 0;
2901
ent->ebx = signature[0];
2902
ent->ecx = signature[1];
2903
ent->edx = signature[2];
2904
break;
2905
2906
case HYPERV_CPUID_SYNDBG_INTERFACE:
2907
memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2908
ent->eax = signature[0];
2909
break;
2910
2911
case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2912
ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2913
break;
2914
2915
default:
2916
break;
2917
}
2918
}
2919
2920
if (copy_to_user(entries, cpuid_entries,
2921
nent * sizeof(struct kvm_cpuid_entry2)))
2922
return -EFAULT;
2923
2924
return 0;
2925
}
2926
2927