Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/nvhe/pkvm.c
29539 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2021 Google LLC
4
* Author: Fuad Tabba <[email protected]>
5
*/
6
7
#include <linux/kvm_host.h>
8
#include <linux/mm.h>
9
10
#include <asm/kvm_emulate.h>
11
12
#include <nvhe/mem_protect.h>
13
#include <nvhe/memory.h>
14
#include <nvhe/pkvm.h>
15
#include <nvhe/trap_handler.h>
16
17
/* Used by icache_is_aliasing(). */
18
unsigned long __icache_flags;
19
20
/* Used by kvm_get_vttbr(). */
21
unsigned int kvm_arm_vmid_bits;
22
23
unsigned int kvm_host_sve_max_vl;
24
25
/*
26
* The currently loaded hyp vCPU for each physical CPU. Used in protected mode
27
* for both protected and non-protected VMs.
28
*/
29
static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
30
31
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
32
{
33
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
34
35
if (has_hvhe())
36
vcpu->arch.hcr_el2 |= HCR_E2H;
37
38
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
39
/* route synchronous external abort exceptions to EL2 */
40
vcpu->arch.hcr_el2 |= HCR_TEA;
41
/* trap error record accesses */
42
vcpu->arch.hcr_el2 |= HCR_TERR;
43
}
44
45
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
46
vcpu->arch.hcr_el2 |= HCR_FWB;
47
48
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
49
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
50
kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0) == read_cpuid(CTR_EL0))
51
vcpu->arch.hcr_el2 |= HCR_TID4;
52
else
53
vcpu->arch.hcr_el2 |= HCR_TID2;
54
55
if (vcpu_has_ptrauth(vcpu))
56
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
57
58
if (kvm_has_mte(vcpu->kvm))
59
vcpu->arch.hcr_el2 |= HCR_ATA;
60
}
61
62
static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
63
{
64
struct kvm *kvm = vcpu->kvm;
65
u64 val = vcpu->arch.hcr_el2;
66
67
/* No support for AArch32. */
68
val |= HCR_RW;
69
70
/*
71
* Always trap:
72
* - Feature id registers: to control features exposed to guests
73
* - Implementation-defined features
74
*/
75
val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
76
77
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
78
val |= HCR_TERR | HCR_TEA;
79
val &= ~(HCR_FIEN);
80
}
81
82
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
83
val &= ~(HCR_AMVOFFEN);
84
85
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
86
val |= HCR_TID5;
87
val &= ~(HCR_DCT | HCR_ATA);
88
}
89
90
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
91
val |= HCR_TLOR;
92
93
vcpu->arch.hcr_el2 = val;
94
}
95
96
static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
97
{
98
struct kvm *kvm = vcpu->kvm;
99
u64 val = vcpu->arch.mdcr_el2;
100
101
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
102
val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
103
val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
104
}
105
106
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
107
val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
108
109
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
110
val |= MDCR_EL2_TDOSA;
111
112
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
113
val |= MDCR_EL2_TPMS;
114
val &= ~MDCR_EL2_E2PB_MASK;
115
}
116
117
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
118
val |= MDCR_EL2_TTRF;
119
120
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
121
val |= MDCR_EL2_E2TB_MASK;
122
123
/* Trap Debug Communications Channel registers */
124
if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
125
val |= MDCR_EL2_TDCC;
126
127
vcpu->arch.mdcr_el2 = val;
128
}
129
130
/*
131
* Check that cpu features that are neither trapped nor supported are not
132
* enabled for protected VMs.
133
*/
134
static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
135
{
136
struct kvm *kvm = vcpu->kvm;
137
138
/* No AArch32 support for protected guests. */
139
if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
140
kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
141
return -EINVAL;
142
143
/*
144
* Linux guests assume support for floating-point and Advanced SIMD. Do
145
* not change the trapping behavior for these from the KVM default.
146
*/
147
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
148
!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
149
return -EINVAL;
150
151
/* No SME support in KVM right now. Check to catch if it changes. */
152
if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
153
return -EINVAL;
154
155
return 0;
156
}
157
158
/*
159
* Initialize trap register values in protected mode.
160
*/
161
static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
162
{
163
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
164
int ret;
165
166
vcpu->arch.mdcr_el2 = 0;
167
168
pkvm_vcpu_reset_hcr(vcpu);
169
170
if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) {
171
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
172
173
/* Trust the host for non-protected vcpu features. */
174
vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
175
return 0;
176
}
177
178
ret = pkvm_check_pvm_cpu_features(vcpu);
179
if (ret)
180
return ret;
181
182
pvm_init_traps_hcr(vcpu);
183
pvm_init_traps_mdcr(vcpu);
184
vcpu_set_hcrx(vcpu);
185
186
return 0;
187
}
188
189
/*
190
* Start the VM table handle at the offset defined instead of at 0.
191
* Mainly for sanity checking and debugging.
192
*/
193
#define HANDLE_OFFSET 0x1000
194
195
/*
196
* Marks a reserved but not yet used entry in the VM table.
197
*/
198
#define RESERVED_ENTRY ((void *)0xa110ca7ed)
199
200
static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
201
{
202
return handle - HANDLE_OFFSET;
203
}
204
205
static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
206
{
207
return idx + HANDLE_OFFSET;
208
}
209
210
/*
211
* Spinlock for protecting state related to the VM table. Protects writes
212
* to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
213
* Also protects reads and writes to 'last_hyp_vcpu_lookup'.
214
*/
215
DEFINE_HYP_SPINLOCK(vm_table_lock);
216
217
/*
218
* A table that tracks all VMs in protected mode.
219
* Allocated during hyp initialization and setup.
220
*/
221
static struct pkvm_hyp_vm **vm_table;
222
223
void pkvm_hyp_vm_table_init(void *tbl)
224
{
225
WARN_ON(vm_table);
226
vm_table = tbl;
227
}
228
229
/*
230
* Return the hyp vm structure corresponding to the handle.
231
*/
232
static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
233
{
234
unsigned int idx = vm_handle_to_idx(handle);
235
236
if (unlikely(idx >= KVM_MAX_PVMS))
237
return NULL;
238
239
/* A reserved entry doesn't represent an initialized VM. */
240
if (unlikely(vm_table[idx] == RESERVED_ENTRY))
241
return NULL;
242
243
return vm_table[idx];
244
}
245
246
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
247
unsigned int vcpu_idx)
248
{
249
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
250
struct pkvm_hyp_vm *hyp_vm;
251
252
/* Cannot load a new vcpu without putting the old one first. */
253
if (__this_cpu_read(loaded_hyp_vcpu))
254
return NULL;
255
256
hyp_spin_lock(&vm_table_lock);
257
hyp_vm = get_vm_by_handle(handle);
258
if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
259
goto unlock;
260
261
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
262
if (!hyp_vcpu)
263
goto unlock;
264
265
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
266
if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
267
hyp_vcpu = NULL;
268
goto unlock;
269
}
270
271
hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
272
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
273
unlock:
274
hyp_spin_unlock(&vm_table_lock);
275
276
if (hyp_vcpu)
277
__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
278
return hyp_vcpu;
279
}
280
281
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
282
{
283
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
284
285
hyp_spin_lock(&vm_table_lock);
286
hyp_vcpu->loaded_hyp_vcpu = NULL;
287
__this_cpu_write(loaded_hyp_vcpu, NULL);
288
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
289
hyp_spin_unlock(&vm_table_lock);
290
}
291
292
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
293
{
294
return __this_cpu_read(loaded_hyp_vcpu);
295
296
}
297
298
struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
299
{
300
struct pkvm_hyp_vm *hyp_vm;
301
302
hyp_spin_lock(&vm_table_lock);
303
hyp_vm = get_vm_by_handle(handle);
304
if (hyp_vm)
305
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
306
hyp_spin_unlock(&vm_table_lock);
307
308
return hyp_vm;
309
}
310
311
void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
312
{
313
hyp_spin_lock(&vm_table_lock);
314
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
315
hyp_spin_unlock(&vm_table_lock);
316
}
317
318
struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
319
{
320
struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
321
322
if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
323
put_pkvm_hyp_vm(hyp_vm);
324
hyp_vm = NULL;
325
}
326
327
return hyp_vm;
328
}
329
330
static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
331
{
332
struct kvm *kvm = &hyp_vm->kvm;
333
unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
334
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
335
336
/* CTR_EL0 is always under host control, even for protected VMs. */
337
hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
338
339
if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
340
set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
341
342
/* No restrictions for non-protected VMs. */
343
if (!kvm_vm_is_protected(kvm)) {
344
hyp_vm->kvm.arch.flags = host_arch_flags;
345
346
bitmap_copy(kvm->arch.vcpu_features,
347
host_kvm->arch.vcpu_features,
348
KVM_VCPU_MAX_FEATURES);
349
350
if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &host_arch_flags))
351
hyp_vm->kvm.arch.midr_el1 = host_kvm->arch.midr_el1;
352
353
return;
354
}
355
356
bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
357
358
set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
359
360
if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3))
361
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
362
363
if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS))
364
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
365
366
if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
367
set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
368
369
if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
370
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
371
kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
372
}
373
374
bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
375
allowed_features, KVM_VCPU_MAX_FEATURES);
376
}
377
378
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
379
{
380
if (host_vcpu)
381
hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
382
}
383
384
static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
385
{
386
void *sve_state;
387
388
if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
389
return;
390
391
sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
392
hyp_unpin_shared_mem(sve_state,
393
sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
394
}
395
396
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
397
unsigned int nr_vcpus)
398
{
399
int i;
400
401
for (i = 0; i < nr_vcpus; i++) {
402
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
403
404
if (!hyp_vcpu)
405
continue;
406
407
unpin_host_vcpu(hyp_vcpu->host_vcpu);
408
unpin_host_sve_state(hyp_vcpu);
409
}
410
}
411
412
static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
413
unsigned int nr_vcpus, pkvm_handle_t handle)
414
{
415
struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
416
int idx = vm_handle_to_idx(handle);
417
418
hyp_vm->kvm.arch.pkvm.handle = handle;
419
420
hyp_vm->host_kvm = host_kvm;
421
hyp_vm->kvm.created_vcpus = nr_vcpus;
422
hyp_vm->kvm.arch.pkvm.is_protected = READ_ONCE(host_kvm->arch.pkvm.is_protected);
423
hyp_vm->kvm.arch.pkvm.is_created = true;
424
hyp_vm->kvm.arch.flags = 0;
425
pkvm_init_features_from_host(hyp_vm, host_kvm);
426
427
/* VMID 0 is reserved for the host */
428
atomic64_set(&mmu->vmid.id, idx + 1);
429
430
mmu->vtcr = host_mmu.arch.mmu.vtcr;
431
mmu->arch = &hyp_vm->kvm.arch;
432
mmu->pgt = &hyp_vm->pgt;
433
}
434
435
static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
436
{
437
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
438
unsigned int sve_max_vl;
439
size_t sve_state_size;
440
void *sve_state;
441
int ret = 0;
442
443
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
444
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
445
return 0;
446
}
447
448
/* Limit guest vector length to the maximum supported by the host. */
449
sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
450
sve_state_size = sve_state_size_from_vl(sve_max_vl);
451
sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
452
453
if (!sve_state || !sve_state_size) {
454
ret = -EINVAL;
455
goto err;
456
}
457
458
ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
459
if (ret)
460
goto err;
461
462
vcpu->arch.sve_state = sve_state;
463
vcpu->arch.sve_max_vl = sve_max_vl;
464
465
return 0;
466
err:
467
clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
468
return ret;
469
}
470
471
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
472
struct pkvm_hyp_vm *hyp_vm,
473
struct kvm_vcpu *host_vcpu)
474
{
475
int ret = 0;
476
477
if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
478
return -EBUSY;
479
480
hyp_vcpu->host_vcpu = host_vcpu;
481
482
hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
483
hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
484
hyp_vcpu->vcpu.vcpu_idx = READ_ONCE(host_vcpu->vcpu_idx);
485
486
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
487
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
488
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
489
490
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
491
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
492
493
ret = pkvm_vcpu_init_traps(hyp_vcpu);
494
if (ret)
495
goto done;
496
497
ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
498
done:
499
if (ret)
500
unpin_host_vcpu(host_vcpu);
501
return ret;
502
}
503
504
static int find_free_vm_table_entry(void)
505
{
506
int i;
507
508
for (i = 0; i < KVM_MAX_PVMS; ++i) {
509
if (!vm_table[i])
510
return i;
511
}
512
513
return -ENOMEM;
514
}
515
516
/*
517
* Reserve a VM table entry.
518
*
519
* Return a unique handle to the VM on success,
520
* negative error code on failure.
521
*/
522
static int allocate_vm_table_entry(void)
523
{
524
int idx;
525
526
hyp_assert_lock_held(&vm_table_lock);
527
528
/*
529
* Initializing protected state might have failed, yet a malicious
530
* host could trigger this function. Thus, ensure that 'vm_table'
531
* exists.
532
*/
533
if (unlikely(!vm_table))
534
return -EINVAL;
535
536
idx = find_free_vm_table_entry();
537
if (unlikely(idx < 0))
538
return idx;
539
540
vm_table[idx] = RESERVED_ENTRY;
541
542
return idx;
543
}
544
545
static int __insert_vm_table_entry(pkvm_handle_t handle,
546
struct pkvm_hyp_vm *hyp_vm)
547
{
548
unsigned int idx;
549
550
hyp_assert_lock_held(&vm_table_lock);
551
552
/*
553
* Initializing protected state might have failed, yet a malicious
554
* host could trigger this function. Thus, ensure that 'vm_table'
555
* exists.
556
*/
557
if (unlikely(!vm_table))
558
return -EINVAL;
559
560
idx = vm_handle_to_idx(handle);
561
if (unlikely(idx >= KVM_MAX_PVMS))
562
return -EINVAL;
563
564
if (unlikely(vm_table[idx] != RESERVED_ENTRY))
565
return -EINVAL;
566
567
vm_table[idx] = hyp_vm;
568
569
return 0;
570
}
571
572
/*
573
* Insert a pointer to the initialized VM into the VM table.
574
*
575
* Return 0 on success, or negative error code on failure.
576
*/
577
static int insert_vm_table_entry(pkvm_handle_t handle,
578
struct pkvm_hyp_vm *hyp_vm)
579
{
580
int ret;
581
582
hyp_spin_lock(&vm_table_lock);
583
ret = __insert_vm_table_entry(handle, hyp_vm);
584
hyp_spin_unlock(&vm_table_lock);
585
586
return ret;
587
}
588
589
/*
590
* Deallocate and remove the VM table entry corresponding to the handle.
591
*/
592
static void remove_vm_table_entry(pkvm_handle_t handle)
593
{
594
hyp_assert_lock_held(&vm_table_lock);
595
vm_table[vm_handle_to_idx(handle)] = NULL;
596
}
597
598
static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
599
{
600
return size_add(sizeof(struct pkvm_hyp_vm),
601
size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
602
}
603
604
static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
605
{
606
void *va = (void *)kern_hyp_va(host_va);
607
608
if (!PAGE_ALIGNED(va))
609
return NULL;
610
611
if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
612
PAGE_ALIGN(size) >> PAGE_SHIFT))
613
return NULL;
614
615
return va;
616
}
617
618
static void *map_donated_memory(unsigned long host_va, size_t size)
619
{
620
void *va = map_donated_memory_noclear(host_va, size);
621
622
if (va)
623
memset(va, 0, size);
624
625
return va;
626
}
627
628
static void __unmap_donated_memory(void *va, size_t size)
629
{
630
kvm_flush_dcache_to_poc(va, size);
631
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
632
PAGE_ALIGN(size) >> PAGE_SHIFT));
633
}
634
635
static void unmap_donated_memory(void *va, size_t size)
636
{
637
if (!va)
638
return;
639
640
memset(va, 0, size);
641
__unmap_donated_memory(va, size);
642
}
643
644
static void unmap_donated_memory_noclear(void *va, size_t size)
645
{
646
if (!va)
647
return;
648
649
__unmap_donated_memory(va, size);
650
}
651
652
/*
653
* Reserves an entry in the hypervisor for a new VM in protected mode.
654
*
655
* Return a unique handle to the VM on success, negative error code on failure.
656
*/
657
int __pkvm_reserve_vm(void)
658
{
659
int ret;
660
661
hyp_spin_lock(&vm_table_lock);
662
ret = allocate_vm_table_entry();
663
hyp_spin_unlock(&vm_table_lock);
664
665
if (ret < 0)
666
return ret;
667
668
return idx_to_vm_handle(ret);
669
}
670
671
/*
672
* Removes a reserved entry, but only if is hasn't been used yet.
673
* Otherwise, the VM needs to be destroyed.
674
*/
675
void __pkvm_unreserve_vm(pkvm_handle_t handle)
676
{
677
unsigned int idx = vm_handle_to_idx(handle);
678
679
if (unlikely(!vm_table))
680
return;
681
682
hyp_spin_lock(&vm_table_lock);
683
if (likely(idx < KVM_MAX_PVMS && vm_table[idx] == RESERVED_ENTRY))
684
remove_vm_table_entry(handle);
685
hyp_spin_unlock(&vm_table_lock);
686
}
687
688
/*
689
* Initialize the hypervisor copy of the VM state using host-donated memory.
690
*
691
* Unmap the donated memory from the host at stage 2.
692
*
693
* host_kvm: A pointer to the host's struct kvm.
694
* vm_hva: The host va of the area being donated for the VM state.
695
* Must be page aligned.
696
* pgd_hva: The host va of the area being donated for the stage-2 PGD for
697
* the VM. Must be page aligned. Its size is implied by the VM's
698
* VTCR.
699
*
700
* Return 0 success, negative error code on failure.
701
*/
702
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
703
unsigned long pgd_hva)
704
{
705
struct pkvm_hyp_vm *hyp_vm = NULL;
706
size_t vm_size, pgd_size;
707
unsigned int nr_vcpus;
708
pkvm_handle_t handle;
709
void *pgd = NULL;
710
int ret;
711
712
ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
713
if (ret)
714
return ret;
715
716
nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
717
if (nr_vcpus < 1) {
718
ret = -EINVAL;
719
goto err_unpin_kvm;
720
}
721
722
handle = READ_ONCE(host_kvm->arch.pkvm.handle);
723
if (unlikely(handle < HANDLE_OFFSET)) {
724
ret = -EINVAL;
725
goto err_unpin_kvm;
726
}
727
728
vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
729
pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
730
731
ret = -ENOMEM;
732
733
hyp_vm = map_donated_memory(vm_hva, vm_size);
734
if (!hyp_vm)
735
goto err_remove_mappings;
736
737
pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
738
if (!pgd)
739
goto err_remove_mappings;
740
741
init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus, handle);
742
743
ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
744
if (ret)
745
goto err_remove_mappings;
746
747
/* Must be called last since this publishes the VM. */
748
ret = insert_vm_table_entry(handle, hyp_vm);
749
if (ret)
750
goto err_remove_mappings;
751
752
return 0;
753
754
err_remove_mappings:
755
unmap_donated_memory(hyp_vm, vm_size);
756
unmap_donated_memory(pgd, pgd_size);
757
err_unpin_kvm:
758
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
759
return ret;
760
}
761
762
/*
763
* Initialize the hypervisor copy of the vCPU state using host-donated memory.
764
*
765
* handle: The hypervisor handle for the vm.
766
* host_vcpu: A pointer to the corresponding host vcpu.
767
* vcpu_hva: The host va of the area being donated for the vcpu state.
768
* Must be page aligned. The size of the area must be equal to
769
* the page-aligned size of 'struct pkvm_hyp_vcpu'.
770
* Return 0 on success, negative error code on failure.
771
*/
772
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
773
unsigned long vcpu_hva)
774
{
775
struct pkvm_hyp_vcpu *hyp_vcpu;
776
struct pkvm_hyp_vm *hyp_vm;
777
unsigned int idx;
778
int ret;
779
780
hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
781
if (!hyp_vcpu)
782
return -ENOMEM;
783
784
hyp_spin_lock(&vm_table_lock);
785
786
hyp_vm = get_vm_by_handle(handle);
787
if (!hyp_vm) {
788
ret = -ENOENT;
789
goto unlock;
790
}
791
792
ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
793
if (ret)
794
goto unlock;
795
796
idx = hyp_vcpu->vcpu.vcpu_idx;
797
if (idx >= hyp_vm->kvm.created_vcpus) {
798
ret = -EINVAL;
799
goto unlock;
800
}
801
802
if (hyp_vm->vcpus[idx]) {
803
ret = -EINVAL;
804
goto unlock;
805
}
806
807
hyp_vm->vcpus[idx] = hyp_vcpu;
808
unlock:
809
hyp_spin_unlock(&vm_table_lock);
810
811
if (ret)
812
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
813
return ret;
814
}
815
816
static void
817
teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
818
{
819
size = PAGE_ALIGN(size);
820
memset(addr, 0, size);
821
822
for (void *start = addr; start < addr + size; start += PAGE_SIZE)
823
push_hyp_memcache(mc, start, hyp_virt_to_phys);
824
825
unmap_donated_memory_noclear(addr, size);
826
}
827
828
int __pkvm_teardown_vm(pkvm_handle_t handle)
829
{
830
struct kvm_hyp_memcache *mc, *stage2_mc;
831
struct pkvm_hyp_vm *hyp_vm;
832
struct kvm *host_kvm;
833
unsigned int idx;
834
size_t vm_size;
835
int err;
836
837
hyp_spin_lock(&vm_table_lock);
838
hyp_vm = get_vm_by_handle(handle);
839
if (!hyp_vm) {
840
err = -ENOENT;
841
goto err_unlock;
842
}
843
844
if (WARN_ON(hyp_page_count(hyp_vm))) {
845
err = -EBUSY;
846
goto err_unlock;
847
}
848
849
host_kvm = hyp_vm->host_kvm;
850
851
/* Ensure the VMID is clean before it can be reallocated */
852
__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
853
remove_vm_table_entry(handle);
854
hyp_spin_unlock(&vm_table_lock);
855
856
/* Reclaim guest pages (including page-table pages) */
857
mc = &host_kvm->arch.pkvm.teardown_mc;
858
stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
859
reclaim_pgtable_pages(hyp_vm, stage2_mc);
860
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->kvm.created_vcpus);
861
862
/* Push the metadata pages to the teardown memcache */
863
for (idx = 0; idx < hyp_vm->kvm.created_vcpus; ++idx) {
864
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
865
struct kvm_hyp_memcache *vcpu_mc;
866
867
if (!hyp_vcpu)
868
continue;
869
870
vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
871
872
while (vcpu_mc->nr_pages) {
873
void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
874
875
push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
876
unmap_donated_memory_noclear(addr, PAGE_SIZE);
877
}
878
879
teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
880
}
881
882
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
883
teardown_donated_memory(mc, hyp_vm, vm_size);
884
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
885
return 0;
886
887
err_unlock:
888
hyp_spin_unlock(&vm_table_lock);
889
return err;
890
}
891
892