Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/svm/sev.c
29524 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Kernel-based Virtual Machine driver for Linux
4
*
5
* AMD SVM-SEV support
6
*
7
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
8
*/
9
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11
#include <linux/kvm_types.h>
12
#include <linux/kvm_host.h>
13
#include <linux/kernel.h>
14
#include <linux/highmem.h>
15
#include <linux/psp.h>
16
#include <linux/psp-sev.h>
17
#include <linux/pagemap.h>
18
#include <linux/swap.h>
19
#include <linux/misc_cgroup.h>
20
#include <linux/processor.h>
21
#include <linux/trace_events.h>
22
#include <uapi/linux/sev-guest.h>
23
24
#include <asm/pkru.h>
25
#include <asm/trapnr.h>
26
#include <asm/fpu/xcr.h>
27
#include <asm/fpu/xstate.h>
28
#include <asm/debugreg.h>
29
#include <asm/msr.h>
30
#include <asm/sev.h>
31
32
#include "mmu.h"
33
#include "x86.h"
34
#include "svm.h"
35
#include "svm_ops.h"
36
#include "cpuid.h"
37
#include "trace.h"
38
39
#define GHCB_VERSION_MAX 2ULL
40
#define GHCB_VERSION_MIN 1ULL
41
42
#define GHCB_HV_FT_SUPPORTED (GHCB_HV_FT_SNP | GHCB_HV_FT_SNP_AP_CREATION)
43
44
/* enable/disable SEV support */
45
static bool sev_enabled = true;
46
module_param_named(sev, sev_enabled, bool, 0444);
47
48
/* enable/disable SEV-ES support */
49
static bool sev_es_enabled = true;
50
module_param_named(sev_es, sev_es_enabled, bool, 0444);
51
52
/* enable/disable SEV-SNP support */
53
static bool sev_snp_enabled = true;
54
module_param_named(sev_snp, sev_snp_enabled, bool, 0444);
55
56
/* enable/disable SEV-ES DebugSwap support */
57
static bool sev_es_debug_swap_enabled = true;
58
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
59
static u64 sev_supported_vmsa_features;
60
61
static unsigned int nr_ciphertext_hiding_asids;
62
module_param_named(ciphertext_hiding_asids, nr_ciphertext_hiding_asids, uint, 0444);
63
64
#define AP_RESET_HOLD_NONE 0
65
#define AP_RESET_HOLD_NAE_EVENT 1
66
#define AP_RESET_HOLD_MSR_PROTO 2
67
68
/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
69
#define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0)
70
#define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8)
71
#define SNP_POLICY_MASK_SMT BIT_ULL(16)
72
#define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17)
73
#define SNP_POLICY_MASK_DEBUG BIT_ULL(19)
74
#define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20)
75
76
#define SNP_POLICY_MASK_VALID (SNP_POLICY_MASK_API_MINOR | \
77
SNP_POLICY_MASK_API_MAJOR | \
78
SNP_POLICY_MASK_SMT | \
79
SNP_POLICY_MASK_RSVD_MBO | \
80
SNP_POLICY_MASK_DEBUG | \
81
SNP_POLICY_MASK_SINGLE_SOCKET)
82
83
#define INITIAL_VMSA_GPA 0xFFFFFFFFF000
84
85
static u8 sev_enc_bit;
86
static DECLARE_RWSEM(sev_deactivate_lock);
87
static DEFINE_MUTEX(sev_bitmap_lock);
88
unsigned int max_sev_asid;
89
static unsigned int min_sev_asid;
90
static unsigned int max_sev_es_asid;
91
static unsigned int min_sev_es_asid;
92
static unsigned int max_snp_asid;
93
static unsigned int min_snp_asid;
94
static unsigned long sev_me_mask;
95
static unsigned int nr_asids;
96
static unsigned long *sev_asid_bitmap;
97
static unsigned long *sev_reclaim_asid_bitmap;
98
99
static int snp_decommission_context(struct kvm *kvm);
100
101
struct enc_region {
102
struct list_head list;
103
unsigned long npages;
104
struct page **pages;
105
unsigned long uaddr;
106
unsigned long size;
107
};
108
109
/* Called with the sev_bitmap_lock held, or on shutdown */
110
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
111
{
112
int ret, error = 0;
113
unsigned int asid;
114
115
/* Check if there are any ASIDs to reclaim before performing a flush */
116
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
117
if (asid > max_asid)
118
return -EBUSY;
119
120
/*
121
* DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
122
* so it must be guarded.
123
*/
124
down_write(&sev_deactivate_lock);
125
126
/* SNP firmware requires use of WBINVD for ASID recycling. */
127
wbinvd_on_all_cpus();
128
129
if (sev_snp_enabled)
130
ret = sev_do_cmd(SEV_CMD_SNP_DF_FLUSH, NULL, &error);
131
else
132
ret = sev_guest_df_flush(&error);
133
134
up_write(&sev_deactivate_lock);
135
136
if (ret)
137
pr_err("SEV%s: DF_FLUSH failed, ret=%d, error=%#x\n",
138
sev_snp_enabled ? "-SNP" : "", ret, error);
139
140
return ret;
141
}
142
143
static inline bool is_mirroring_enc_context(struct kvm *kvm)
144
{
145
return !!to_kvm_sev_info(kvm)->enc_context_owner;
146
}
147
148
static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
149
{
150
struct kvm_vcpu *vcpu = &svm->vcpu;
151
struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
152
153
return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
154
}
155
156
static bool snp_is_secure_tsc_enabled(struct kvm *kvm)
157
{
158
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
159
160
return (sev->vmsa_features & SVM_SEV_FEAT_SECURE_TSC) &&
161
!WARN_ON_ONCE(!sev_snp_guest(kvm));
162
}
163
164
/* Must be called with the sev_bitmap_lock held */
165
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
166
{
167
if (sev_flush_asids(min_asid, max_asid))
168
return false;
169
170
/* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
171
bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
172
nr_asids);
173
bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
174
175
return true;
176
}
177
178
static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
179
{
180
enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
181
return misc_cg_try_charge(type, sev->misc_cg, 1);
182
}
183
184
static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
185
{
186
enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
187
misc_cg_uncharge(type, sev->misc_cg, 1);
188
}
189
190
static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type)
191
{
192
/*
193
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
194
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
195
*/
196
unsigned int min_asid, max_asid, asid;
197
bool retry = true;
198
int ret;
199
200
if (vm_type == KVM_X86_SNP_VM) {
201
min_asid = min_snp_asid;
202
max_asid = max_snp_asid;
203
} else if (sev->es_active) {
204
min_asid = min_sev_es_asid;
205
max_asid = max_sev_es_asid;
206
} else {
207
min_asid = min_sev_asid;
208
max_asid = max_sev_asid;
209
}
210
211
/*
212
* The min ASID can end up larger than the max if basic SEV support is
213
* effectively disabled by disallowing use of ASIDs for SEV guests.
214
* Similarly for SEV-ES guests the min ASID can end up larger than the
215
* max when ciphertext hiding is enabled, effectively disabling SEV-ES
216
* support.
217
*/
218
if (min_asid > max_asid)
219
return -ENOTTY;
220
221
WARN_ON(sev->misc_cg);
222
sev->misc_cg = get_current_misc_cg();
223
ret = sev_misc_cg_try_charge(sev);
224
if (ret) {
225
put_misc_cg(sev->misc_cg);
226
sev->misc_cg = NULL;
227
return ret;
228
}
229
230
mutex_lock(&sev_bitmap_lock);
231
232
again:
233
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
234
if (asid > max_asid) {
235
if (retry && __sev_recycle_asids(min_asid, max_asid)) {
236
retry = false;
237
goto again;
238
}
239
mutex_unlock(&sev_bitmap_lock);
240
ret = -EBUSY;
241
goto e_uncharge;
242
}
243
244
__set_bit(asid, sev_asid_bitmap);
245
246
mutex_unlock(&sev_bitmap_lock);
247
248
sev->asid = asid;
249
return 0;
250
e_uncharge:
251
sev_misc_cg_uncharge(sev);
252
put_misc_cg(sev->misc_cg);
253
sev->misc_cg = NULL;
254
return ret;
255
}
256
257
static unsigned int sev_get_asid(struct kvm *kvm)
258
{
259
return to_kvm_sev_info(kvm)->asid;
260
}
261
262
static void sev_asid_free(struct kvm_sev_info *sev)
263
{
264
struct svm_cpu_data *sd;
265
int cpu;
266
267
mutex_lock(&sev_bitmap_lock);
268
269
__set_bit(sev->asid, sev_reclaim_asid_bitmap);
270
271
for_each_possible_cpu(cpu) {
272
sd = per_cpu_ptr(&svm_data, cpu);
273
sd->sev_vmcbs[sev->asid] = NULL;
274
}
275
276
mutex_unlock(&sev_bitmap_lock);
277
278
sev_misc_cg_uncharge(sev);
279
put_misc_cg(sev->misc_cg);
280
sev->misc_cg = NULL;
281
}
282
283
static void sev_decommission(unsigned int handle)
284
{
285
struct sev_data_decommission decommission;
286
287
if (!handle)
288
return;
289
290
decommission.handle = handle;
291
sev_guest_decommission(&decommission, NULL);
292
}
293
294
/*
295
* Transition a page to hypervisor-owned/shared state in the RMP table. This
296
* should not fail under normal conditions, but leak the page should that
297
* happen since it will no longer be usable by the host due to RMP protections.
298
*/
299
static int kvm_rmp_make_shared(struct kvm *kvm, u64 pfn, enum pg_level level)
300
{
301
if (KVM_BUG_ON(rmp_make_shared(pfn, level), kvm)) {
302
snp_leak_pages(pfn, page_level_size(level) >> PAGE_SHIFT);
303
return -EIO;
304
}
305
306
return 0;
307
}
308
309
/*
310
* Certain page-states, such as Pre-Guest and Firmware pages (as documented
311
* in Chapter 5 of the SEV-SNP Firmware ABI under "Page States") cannot be
312
* directly transitioned back to normal/hypervisor-owned state via RMPUPDATE
313
* unless they are reclaimed first.
314
*
315
* Until they are reclaimed and subsequently transitioned via RMPUPDATE, they
316
* might not be usable by the host due to being set as immutable or still
317
* being associated with a guest ASID.
318
*
319
* Bug the VM and leak the page if reclaim fails, or if the RMP entry can't be
320
* converted back to shared, as the page is no longer usable due to RMP
321
* protections, and it's infeasible for the guest to continue on.
322
*/
323
static int snp_page_reclaim(struct kvm *kvm, u64 pfn)
324
{
325
struct sev_data_snp_page_reclaim data = {0};
326
int fw_err, rc;
327
328
data.paddr = __sme_set(pfn << PAGE_SHIFT);
329
rc = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &fw_err);
330
if (KVM_BUG(rc, kvm, "Failed to reclaim PFN %llx, rc %d fw_err %d", pfn, rc, fw_err)) {
331
snp_leak_pages(pfn, 1);
332
return -EIO;
333
}
334
335
if (kvm_rmp_make_shared(kvm, pfn, PG_LEVEL_4K))
336
return -EIO;
337
338
return rc;
339
}
340
341
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
342
{
343
struct sev_data_deactivate deactivate;
344
345
if (!handle)
346
return;
347
348
deactivate.handle = handle;
349
350
/* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
351
down_read(&sev_deactivate_lock);
352
sev_guest_deactivate(&deactivate, NULL);
353
up_read(&sev_deactivate_lock);
354
355
sev_decommission(handle);
356
}
357
358
/*
359
* This sets up bounce buffers/firmware pages to handle SNP Guest Request
360
* messages (e.g. attestation requests). See "SNP Guest Request" in the GHCB
361
* 2.0 specification for more details.
362
*
363
* Technically, when an SNP Guest Request is issued, the guest will provide its
364
* own request/response pages, which could in theory be passed along directly
365
* to firmware rather than using bounce pages. However, these pages would need
366
* special care:
367
*
368
* - Both pages are from shared guest memory, so they need to be protected
369
* from migration/etc. occurring while firmware reads/writes to them. At a
370
* minimum, this requires elevating the ref counts and potentially needing
371
* an explicit pinning of the memory. This places additional restrictions
372
* on what type of memory backends userspace can use for shared guest
373
* memory since there is some reliance on using refcounted pages.
374
*
375
* - The response page needs to be switched to Firmware-owned[1] state
376
* before the firmware can write to it, which can lead to potential
377
* host RMP #PFs if the guest is misbehaved and hands the host a
378
* guest page that KVM might write to for other reasons (e.g. virtio
379
* buffers/etc.).
380
*
381
* Both of these issues can be avoided completely by using separately-allocated
382
* bounce pages for both the request/response pages and passing those to
383
* firmware instead. So that's what is being set up here.
384
*
385
* Guest requests rely on message sequence numbers to ensure requests are
386
* issued to firmware in the order the guest issues them, so concurrent guest
387
* requests generally shouldn't happen. But a misbehaved guest could issue
388
* concurrent guest requests in theory, so a mutex is used to serialize
389
* access to the bounce buffers.
390
*
391
* [1] See the "Page States" section of the SEV-SNP Firmware ABI for more
392
* details on Firmware-owned pages, along with "RMP and VMPL Access Checks"
393
* in the APM for details on the related RMP restrictions.
394
*/
395
static int snp_guest_req_init(struct kvm *kvm)
396
{
397
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
398
struct page *req_page;
399
400
req_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
401
if (!req_page)
402
return -ENOMEM;
403
404
sev->guest_resp_buf = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
405
if (!sev->guest_resp_buf) {
406
__free_page(req_page);
407
return -EIO;
408
}
409
410
sev->guest_req_buf = page_address(req_page);
411
mutex_init(&sev->guest_req_mutex);
412
413
return 0;
414
}
415
416
static void snp_guest_req_cleanup(struct kvm *kvm)
417
{
418
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
419
420
if (sev->guest_resp_buf)
421
snp_free_firmware_page(sev->guest_resp_buf);
422
423
if (sev->guest_req_buf)
424
__free_page(virt_to_page(sev->guest_req_buf));
425
426
sev->guest_req_buf = NULL;
427
sev->guest_resp_buf = NULL;
428
}
429
430
static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
431
struct kvm_sev_init *data,
432
unsigned long vm_type)
433
{
434
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
435
struct sev_platform_init_args init_args = {0};
436
bool es_active = vm_type != KVM_X86_SEV_VM;
437
bool snp_active = vm_type == KVM_X86_SNP_VM;
438
u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
439
int ret;
440
441
if (kvm->created_vcpus)
442
return -EINVAL;
443
444
if (data->flags)
445
return -EINVAL;
446
447
if (!snp_active)
448
valid_vmsa_features &= ~SVM_SEV_FEAT_SECURE_TSC;
449
450
if (data->vmsa_features & ~valid_vmsa_features)
451
return -EINVAL;
452
453
if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version))
454
return -EINVAL;
455
456
/*
457
* KVM supports the full range of mandatory features defined by version
458
* 2 of the GHCB protocol, so default to that for SEV-ES guests created
459
* via KVM_SEV_INIT2 (KVM_SEV_INIT forces version 1).
460
*/
461
if (es_active && !data->ghcb_version)
462
data->ghcb_version = 2;
463
464
if (snp_active && data->ghcb_version < 2)
465
return -EINVAL;
466
467
if (unlikely(sev->active))
468
return -EINVAL;
469
470
sev->active = true;
471
sev->es_active = es_active;
472
sev->vmsa_features = data->vmsa_features;
473
sev->ghcb_version = data->ghcb_version;
474
475
if (snp_active)
476
sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE;
477
478
ret = sev_asid_new(sev, vm_type);
479
if (ret)
480
goto e_no_asid;
481
482
init_args.probe = false;
483
ret = sev_platform_init(&init_args);
484
if (ret)
485
goto e_free_asid;
486
487
if (!zalloc_cpumask_var(&sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) {
488
ret = -ENOMEM;
489
goto e_free_asid;
490
}
491
492
/* This needs to happen after SEV/SNP firmware initialization. */
493
if (snp_active) {
494
ret = snp_guest_req_init(kvm);
495
if (ret)
496
goto e_free;
497
}
498
499
INIT_LIST_HEAD(&sev->regions_list);
500
INIT_LIST_HEAD(&sev->mirror_vms);
501
sev->need_init = false;
502
503
kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
504
505
return 0;
506
507
e_free:
508
free_cpumask_var(sev->have_run_cpus);
509
e_free_asid:
510
argp->error = init_args.error;
511
sev_asid_free(sev);
512
sev->asid = 0;
513
e_no_asid:
514
sev->vmsa_features = 0;
515
sev->es_active = false;
516
sev->active = false;
517
return ret;
518
}
519
520
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
521
{
522
struct kvm_sev_init data = {
523
.vmsa_features = 0,
524
.ghcb_version = 0,
525
};
526
unsigned long vm_type;
527
528
if (kvm->arch.vm_type != KVM_X86_DEFAULT_VM)
529
return -EINVAL;
530
531
vm_type = (argp->id == KVM_SEV_INIT ? KVM_X86_SEV_VM : KVM_X86_SEV_ES_VM);
532
533
/*
534
* KVM_SEV_ES_INIT has been deprecated by KVM_SEV_INIT2, so it will
535
* continue to only ever support the minimal GHCB protocol version.
536
*/
537
if (vm_type == KVM_X86_SEV_ES_VM)
538
data.ghcb_version = GHCB_VERSION_MIN;
539
540
return __sev_guest_init(kvm, argp, &data, vm_type);
541
}
542
543
static int sev_guest_init2(struct kvm *kvm, struct kvm_sev_cmd *argp)
544
{
545
struct kvm_sev_init data;
546
547
if (!to_kvm_sev_info(kvm)->need_init)
548
return -EINVAL;
549
550
if (kvm->arch.vm_type != KVM_X86_SEV_VM &&
551
kvm->arch.vm_type != KVM_X86_SEV_ES_VM &&
552
kvm->arch.vm_type != KVM_X86_SNP_VM)
553
return -EINVAL;
554
555
if (copy_from_user(&data, u64_to_user_ptr(argp->data), sizeof(data)))
556
return -EFAULT;
557
558
return __sev_guest_init(kvm, argp, &data, kvm->arch.vm_type);
559
}
560
561
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
562
{
563
unsigned int asid = sev_get_asid(kvm);
564
struct sev_data_activate activate;
565
int ret;
566
567
/* activate ASID on the given handle */
568
activate.handle = handle;
569
activate.asid = asid;
570
ret = sev_guest_activate(&activate, error);
571
572
return ret;
573
}
574
575
static int __sev_issue_cmd(int fd, int id, void *data, int *error)
576
{
577
CLASS(fd, f)(fd);
578
579
if (fd_empty(f))
580
return -EBADF;
581
582
return sev_issue_cmd_external_user(fd_file(f), id, data, error);
583
}
584
585
static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
586
{
587
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
588
589
return __sev_issue_cmd(sev->fd, id, data, error);
590
}
591
592
static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
593
{
594
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
595
struct sev_data_launch_start start;
596
struct kvm_sev_launch_start params;
597
void *dh_blob, *session_blob;
598
int *error = &argp->error;
599
int ret;
600
601
if (!sev_guest(kvm))
602
return -ENOTTY;
603
604
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
605
return -EFAULT;
606
607
memset(&start, 0, sizeof(start));
608
609
dh_blob = NULL;
610
if (params.dh_uaddr) {
611
dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
612
if (IS_ERR(dh_blob))
613
return PTR_ERR(dh_blob);
614
615
start.dh_cert_address = __sme_set(__pa(dh_blob));
616
start.dh_cert_len = params.dh_len;
617
}
618
619
session_blob = NULL;
620
if (params.session_uaddr) {
621
session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
622
if (IS_ERR(session_blob)) {
623
ret = PTR_ERR(session_blob);
624
goto e_free_dh;
625
}
626
627
start.session_address = __sme_set(__pa(session_blob));
628
start.session_len = params.session_len;
629
}
630
631
start.handle = params.handle;
632
start.policy = params.policy;
633
634
/* create memory encryption context */
635
ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
636
if (ret)
637
goto e_free_session;
638
639
/* Bind ASID to this guest */
640
ret = sev_bind_asid(kvm, start.handle, error);
641
if (ret) {
642
sev_decommission(start.handle);
643
goto e_free_session;
644
}
645
646
/* return handle to userspace */
647
params.handle = start.handle;
648
if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))) {
649
sev_unbind_asid(kvm, start.handle);
650
ret = -EFAULT;
651
goto e_free_session;
652
}
653
654
sev->policy = params.policy;
655
sev->handle = start.handle;
656
sev->fd = argp->sev_fd;
657
658
e_free_session:
659
kfree(session_blob);
660
e_free_dh:
661
kfree(dh_blob);
662
return ret;
663
}
664
665
static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
666
unsigned long ulen, unsigned long *n,
667
unsigned int flags)
668
{
669
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
670
unsigned long npages, size;
671
int npinned;
672
unsigned long locked, lock_limit;
673
struct page **pages;
674
unsigned long first, last;
675
int ret;
676
677
lockdep_assert_held(&kvm->lock);
678
679
if (ulen == 0 || uaddr + ulen < uaddr)
680
return ERR_PTR(-EINVAL);
681
682
/* Calculate number of pages. */
683
first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
684
last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
685
npages = (last - first + 1);
686
687
locked = sev->pages_locked + npages;
688
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
689
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
690
pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
691
return ERR_PTR(-ENOMEM);
692
}
693
694
if (WARN_ON_ONCE(npages > INT_MAX))
695
return ERR_PTR(-EINVAL);
696
697
/* Avoid using vmalloc for smaller buffers. */
698
size = npages * sizeof(struct page *);
699
if (size > PAGE_SIZE)
700
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
701
else
702
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
703
704
if (!pages)
705
return ERR_PTR(-ENOMEM);
706
707
/* Pin the user virtual address. */
708
npinned = pin_user_pages_fast(uaddr, npages, flags, pages);
709
if (npinned != npages) {
710
pr_err("SEV: Failure locking %lu pages.\n", npages);
711
ret = -ENOMEM;
712
goto err;
713
}
714
715
*n = npages;
716
sev->pages_locked = locked;
717
718
return pages;
719
720
err:
721
if (npinned > 0)
722
unpin_user_pages(pages, npinned);
723
724
kvfree(pages);
725
return ERR_PTR(ret);
726
}
727
728
static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
729
unsigned long npages)
730
{
731
unpin_user_pages(pages, npages);
732
kvfree(pages);
733
to_kvm_sev_info(kvm)->pages_locked -= npages;
734
}
735
736
static void sev_clflush_pages(struct page *pages[], unsigned long npages)
737
{
738
uint8_t *page_virtual;
739
unsigned long i;
740
741
if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
742
pages == NULL)
743
return;
744
745
for (i = 0; i < npages; i++) {
746
page_virtual = kmap_local_page(pages[i]);
747
clflush_cache_range(page_virtual, PAGE_SIZE);
748
kunmap_local(page_virtual);
749
cond_resched();
750
}
751
}
752
753
static void sev_writeback_caches(struct kvm *kvm)
754
{
755
/*
756
* Ensure that all dirty guest tagged cache entries are written back
757
* before releasing the pages back to the system for use. CLFLUSH will
758
* not do this without SME_COHERENT, and flushing many cache lines
759
* individually is slower than blasting WBINVD for large VMs, so issue
760
* WBNOINVD (or WBINVD if the "no invalidate" variant is unsupported)
761
* on CPUs that have done VMRUN, i.e. may have dirtied data using the
762
* VM's ASID.
763
*
764
* For simplicity, never remove CPUs from the bitmap. Ideally, KVM
765
* would clear the mask when flushing caches, but doing so requires
766
* serializing multiple calls and having responding CPUs (to the IPI)
767
* mark themselves as still running if they are running (or about to
768
* run) a vCPU for the VM.
769
*
770
* Note, the caller is responsible for ensuring correctness if the mask
771
* can be modified, e.g. if a CPU could be doing VMRUN.
772
*/
773
wbnoinvd_on_cpus_mask(to_kvm_sev_info(kvm)->have_run_cpus);
774
}
775
776
static unsigned long get_num_contig_pages(unsigned long idx,
777
struct page **inpages, unsigned long npages)
778
{
779
unsigned long paddr, next_paddr;
780
unsigned long i = idx + 1, pages = 1;
781
782
/* find the number of contiguous pages starting from idx */
783
paddr = __sme_page_pa(inpages[idx]);
784
while (i < npages) {
785
next_paddr = __sme_page_pa(inpages[i++]);
786
if ((paddr + PAGE_SIZE) == next_paddr) {
787
pages++;
788
paddr = next_paddr;
789
continue;
790
}
791
break;
792
}
793
794
return pages;
795
}
796
797
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
798
{
799
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
800
struct kvm_sev_launch_update_data params;
801
struct sev_data_launch_update_data data;
802
struct page **inpages;
803
int ret;
804
805
if (!sev_guest(kvm))
806
return -ENOTTY;
807
808
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
809
return -EFAULT;
810
811
vaddr = params.uaddr;
812
size = params.len;
813
vaddr_end = vaddr + size;
814
815
/* Lock the user memory. */
816
inpages = sev_pin_memory(kvm, vaddr, size, &npages, FOLL_WRITE);
817
if (IS_ERR(inpages))
818
return PTR_ERR(inpages);
819
820
/*
821
* Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
822
* place; the cache may contain the data that was written unencrypted.
823
*/
824
sev_clflush_pages(inpages, npages);
825
826
data.reserved = 0;
827
data.handle = to_kvm_sev_info(kvm)->handle;
828
829
for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
830
int offset, len;
831
832
/*
833
* If the user buffer is not page-aligned, calculate the offset
834
* within the page.
835
*/
836
offset = vaddr & (PAGE_SIZE - 1);
837
838
/* Calculate the number of pages that can be encrypted in one go. */
839
pages = get_num_contig_pages(i, inpages, npages);
840
841
len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
842
843
data.len = len;
844
data.address = __sme_page_pa(inpages[i]) + offset;
845
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
846
if (ret)
847
goto e_unpin;
848
849
size -= len;
850
next_vaddr = vaddr + len;
851
}
852
853
e_unpin:
854
/* content of memory is updated, mark pages dirty */
855
for (i = 0; i < npages; i++) {
856
set_page_dirty_lock(inpages[i]);
857
mark_page_accessed(inpages[i]);
858
}
859
/* unlock the user pages */
860
sev_unpin_memory(kvm, inpages, npages);
861
return ret;
862
}
863
864
static int sev_es_sync_vmsa(struct vcpu_svm *svm)
865
{
866
struct kvm_vcpu *vcpu = &svm->vcpu;
867
struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
868
struct sev_es_save_area *save = svm->sev_es.vmsa;
869
struct xregs_state *xsave;
870
const u8 *s;
871
u8 *d;
872
int i;
873
874
/* Check some debug related fields before encrypting the VMSA */
875
if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
876
return -EINVAL;
877
878
/*
879
* SEV-ES will use a VMSA that is pointed to by the VMCB, not
880
* the traditional VMSA that is part of the VMCB. Copy the
881
* traditional VMSA as it has been built so far (in prep
882
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
883
*/
884
memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save));
885
886
/* Sync registgers */
887
save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
888
save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
889
save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
890
save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
891
save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
892
save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
893
save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
894
save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
895
#ifdef CONFIG_X86_64
896
save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
897
save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
898
save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
899
save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
900
save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
901
save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
902
save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
903
save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
904
#endif
905
save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
906
907
/* Sync some non-GPR registers before encrypting */
908
save->xcr0 = svm->vcpu.arch.xcr0;
909
save->pkru = svm->vcpu.arch.pkru;
910
save->xss = svm->vcpu.arch.ia32_xss;
911
save->dr6 = svm->vcpu.arch.dr6;
912
913
save->sev_features = sev->vmsa_features;
914
915
/*
916
* Skip FPU and AVX setup with KVM_SEV_ES_INIT to avoid
917
* breaking older measurements.
918
*/
919
if (vcpu->kvm->arch.vm_type != KVM_X86_DEFAULT_VM) {
920
xsave = &vcpu->arch.guest_fpu.fpstate->regs.xsave;
921
save->x87_dp = xsave->i387.rdp;
922
save->mxcsr = xsave->i387.mxcsr;
923
save->x87_ftw = xsave->i387.twd;
924
save->x87_fsw = xsave->i387.swd;
925
save->x87_fcw = xsave->i387.cwd;
926
save->x87_fop = xsave->i387.fop;
927
save->x87_ds = 0;
928
save->x87_cs = 0;
929
save->x87_rip = xsave->i387.rip;
930
931
for (i = 0; i < 8; i++) {
932
/*
933
* The format of the x87 save area is undocumented and
934
* definitely not what you would expect. It consists of
935
* an 8*8 bytes area with bytes 0-7, and an 8*2 bytes
936
* area with bytes 8-9 of each register.
937
*/
938
d = save->fpreg_x87 + i * 8;
939
s = ((u8 *)xsave->i387.st_space) + i * 16;
940
memcpy(d, s, 8);
941
save->fpreg_x87[64 + i * 2] = s[8];
942
save->fpreg_x87[64 + i * 2 + 1] = s[9];
943
}
944
memcpy(save->fpreg_xmm, xsave->i387.xmm_space, 256);
945
946
s = get_xsave_addr(xsave, XFEATURE_YMM);
947
if (s)
948
memcpy(save->fpreg_ymm, s, 256);
949
else
950
memset(save->fpreg_ymm, 0, 256);
951
}
952
953
pr_debug("Virtual Machine Save Area (VMSA):\n");
954
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
955
956
return 0;
957
}
958
959
static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
960
int *error)
961
{
962
struct sev_data_launch_update_vmsa vmsa;
963
struct vcpu_svm *svm = to_svm(vcpu);
964
int ret;
965
966
if (vcpu->guest_debug) {
967
pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
968
return -EINVAL;
969
}
970
971
/* Perform some pre-encryption checks against the VMSA */
972
ret = sev_es_sync_vmsa(svm);
973
if (ret)
974
return ret;
975
976
/*
977
* The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
978
* the VMSA memory content (i.e it will write the same memory region
979
* with the guest's key), so invalidate it first.
980
*/
981
clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
982
983
vmsa.reserved = 0;
984
vmsa.handle = to_kvm_sev_info(kvm)->handle;
985
vmsa.address = __sme_pa(svm->sev_es.vmsa);
986
vmsa.len = PAGE_SIZE;
987
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
988
if (ret)
989
return ret;
990
991
/*
992
* SEV-ES guests maintain an encrypted version of their FPU
993
* state which is restored and saved on VMRUN and VMEXIT.
994
* Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
995
* do xsave/xrstor on it.
996
*/
997
fpstate_set_confidential(&vcpu->arch.guest_fpu);
998
vcpu->arch.guest_state_protected = true;
999
1000
/*
1001
* SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
1002
* only after setting guest_state_protected because KVM_SET_MSRS allows
1003
* dynamic toggling of LBRV (for performance reason) on write access to
1004
* MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
1005
*/
1006
svm_enable_lbrv(vcpu);
1007
return 0;
1008
}
1009
1010
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
1011
{
1012
struct kvm_vcpu *vcpu;
1013
unsigned long i;
1014
int ret;
1015
1016
if (!sev_es_guest(kvm))
1017
return -ENOTTY;
1018
1019
kvm_for_each_vcpu(i, vcpu, kvm) {
1020
ret = mutex_lock_killable(&vcpu->mutex);
1021
if (ret)
1022
return ret;
1023
1024
ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
1025
1026
mutex_unlock(&vcpu->mutex);
1027
if (ret)
1028
return ret;
1029
}
1030
1031
return 0;
1032
}
1033
1034
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
1035
{
1036
void __user *measure = u64_to_user_ptr(argp->data);
1037
struct sev_data_launch_measure data;
1038
struct kvm_sev_launch_measure params;
1039
void __user *p = NULL;
1040
void *blob = NULL;
1041
int ret;
1042
1043
if (!sev_guest(kvm))
1044
return -ENOTTY;
1045
1046
if (copy_from_user(&params, measure, sizeof(params)))
1047
return -EFAULT;
1048
1049
memset(&data, 0, sizeof(data));
1050
1051
/* User wants to query the blob length */
1052
if (!params.len)
1053
goto cmd;
1054
1055
p = u64_to_user_ptr(params.uaddr);
1056
if (p) {
1057
if (params.len > SEV_FW_BLOB_MAX_SIZE)
1058
return -EINVAL;
1059
1060
blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1061
if (!blob)
1062
return -ENOMEM;
1063
1064
data.address = __psp_pa(blob);
1065
data.len = params.len;
1066
}
1067
1068
cmd:
1069
data.handle = to_kvm_sev_info(kvm)->handle;
1070
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
1071
1072
/*
1073
* If we query the session length, FW responded with expected data.
1074
*/
1075
if (!params.len)
1076
goto done;
1077
1078
if (ret)
1079
goto e_free_blob;
1080
1081
if (blob) {
1082
if (copy_to_user(p, blob, params.len))
1083
ret = -EFAULT;
1084
}
1085
1086
done:
1087
params.len = data.len;
1088
if (copy_to_user(measure, &params, sizeof(params)))
1089
ret = -EFAULT;
1090
e_free_blob:
1091
kfree(blob);
1092
return ret;
1093
}
1094
1095
static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1096
{
1097
struct sev_data_launch_finish data;
1098
1099
if (!sev_guest(kvm))
1100
return -ENOTTY;
1101
1102
data.handle = to_kvm_sev_info(kvm)->handle;
1103
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
1104
}
1105
1106
static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
1107
{
1108
struct kvm_sev_guest_status params;
1109
struct sev_data_guest_status data;
1110
int ret;
1111
1112
if (!sev_guest(kvm))
1113
return -ENOTTY;
1114
1115
memset(&data, 0, sizeof(data));
1116
1117
data.handle = to_kvm_sev_info(kvm)->handle;
1118
ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
1119
if (ret)
1120
return ret;
1121
1122
params.policy = data.policy;
1123
params.state = data.state;
1124
params.handle = data.handle;
1125
1126
if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params)))
1127
ret = -EFAULT;
1128
1129
return ret;
1130
}
1131
1132
static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
1133
unsigned long dst, int size,
1134
int *error, bool enc)
1135
{
1136
struct sev_data_dbg data;
1137
1138
data.reserved = 0;
1139
data.handle = to_kvm_sev_info(kvm)->handle;
1140
data.dst_addr = dst;
1141
data.src_addr = src;
1142
data.len = size;
1143
1144
return sev_issue_cmd(kvm,
1145
enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
1146
&data, error);
1147
}
1148
1149
static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
1150
unsigned long dst_paddr, int sz, int *err)
1151
{
1152
int offset;
1153
1154
/*
1155
* Its safe to read more than we are asked, caller should ensure that
1156
* destination has enough space.
1157
*/
1158
offset = src_paddr & 15;
1159
src_paddr = round_down(src_paddr, 16);
1160
sz = round_up(sz + offset, 16);
1161
1162
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
1163
}
1164
1165
static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
1166
void __user *dst_uaddr,
1167
unsigned long dst_paddr,
1168
int size, int *err)
1169
{
1170
struct page *tpage = NULL;
1171
int ret, offset;
1172
1173
/* if inputs are not 16-byte then use intermediate buffer */
1174
if (!IS_ALIGNED(dst_paddr, 16) ||
1175
!IS_ALIGNED(paddr, 16) ||
1176
!IS_ALIGNED(size, 16)) {
1177
tpage = (void *)alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1178
if (!tpage)
1179
return -ENOMEM;
1180
1181
dst_paddr = __sme_page_pa(tpage);
1182
}
1183
1184
ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
1185
if (ret)
1186
goto e_free;
1187
1188
if (tpage) {
1189
offset = paddr & 15;
1190
if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
1191
ret = -EFAULT;
1192
}
1193
1194
e_free:
1195
if (tpage)
1196
__free_page(tpage);
1197
1198
return ret;
1199
}
1200
1201
static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
1202
void __user *vaddr,
1203
unsigned long dst_paddr,
1204
void __user *dst_vaddr,
1205
int size, int *error)
1206
{
1207
struct page *src_tpage = NULL;
1208
struct page *dst_tpage = NULL;
1209
int ret, len = size;
1210
1211
/* If source buffer is not aligned then use an intermediate buffer */
1212
if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
1213
src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
1214
if (!src_tpage)
1215
return -ENOMEM;
1216
1217
if (copy_from_user(page_address(src_tpage), vaddr, size)) {
1218
__free_page(src_tpage);
1219
return -EFAULT;
1220
}
1221
1222
paddr = __sme_page_pa(src_tpage);
1223
}
1224
1225
/*
1226
* If destination buffer or length is not aligned then do read-modify-write:
1227
* - decrypt destination in an intermediate buffer
1228
* - copy the source buffer in an intermediate buffer
1229
* - use the intermediate buffer as source buffer
1230
*/
1231
if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
1232
int dst_offset;
1233
1234
dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
1235
if (!dst_tpage) {
1236
ret = -ENOMEM;
1237
goto e_free;
1238
}
1239
1240
ret = __sev_dbg_decrypt(kvm, dst_paddr,
1241
__sme_page_pa(dst_tpage), size, error);
1242
if (ret)
1243
goto e_free;
1244
1245
/*
1246
* If source is kernel buffer then use memcpy() otherwise
1247
* copy_from_user().
1248
*/
1249
dst_offset = dst_paddr & 15;
1250
1251
if (src_tpage)
1252
memcpy(page_address(dst_tpage) + dst_offset,
1253
page_address(src_tpage), size);
1254
else {
1255
if (copy_from_user(page_address(dst_tpage) + dst_offset,
1256
vaddr, size)) {
1257
ret = -EFAULT;
1258
goto e_free;
1259
}
1260
}
1261
1262
paddr = __sme_page_pa(dst_tpage);
1263
dst_paddr = round_down(dst_paddr, 16);
1264
len = round_up(size, 16);
1265
}
1266
1267
ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
1268
1269
e_free:
1270
if (src_tpage)
1271
__free_page(src_tpage);
1272
if (dst_tpage)
1273
__free_page(dst_tpage);
1274
return ret;
1275
}
1276
1277
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
1278
{
1279
unsigned long vaddr, vaddr_end, next_vaddr;
1280
unsigned long dst_vaddr;
1281
struct page **src_p, **dst_p;
1282
struct kvm_sev_dbg debug;
1283
unsigned long n;
1284
unsigned int size;
1285
int ret;
1286
1287
if (!sev_guest(kvm))
1288
return -ENOTTY;
1289
1290
if (copy_from_user(&debug, u64_to_user_ptr(argp->data), sizeof(debug)))
1291
return -EFAULT;
1292
1293
if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
1294
return -EINVAL;
1295
if (!debug.dst_uaddr)
1296
return -EINVAL;
1297
1298
vaddr = debug.src_uaddr;
1299
size = debug.len;
1300
vaddr_end = vaddr + size;
1301
dst_vaddr = debug.dst_uaddr;
1302
1303
for (; vaddr < vaddr_end; vaddr = next_vaddr) {
1304
int len, s_off, d_off;
1305
1306
/* lock userspace source and destination page */
1307
src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
1308
if (IS_ERR(src_p))
1309
return PTR_ERR(src_p);
1310
1311
dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, FOLL_WRITE);
1312
if (IS_ERR(dst_p)) {
1313
sev_unpin_memory(kvm, src_p, n);
1314
return PTR_ERR(dst_p);
1315
}
1316
1317
/*
1318
* Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
1319
* the pages; flush the destination too so that future accesses do not
1320
* see stale data.
1321
*/
1322
sev_clflush_pages(src_p, 1);
1323
sev_clflush_pages(dst_p, 1);
1324
1325
/*
1326
* Since user buffer may not be page aligned, calculate the
1327
* offset within the page.
1328
*/
1329
s_off = vaddr & ~PAGE_MASK;
1330
d_off = dst_vaddr & ~PAGE_MASK;
1331
len = min_t(size_t, (PAGE_SIZE - s_off), size);
1332
1333
if (dec)
1334
ret = __sev_dbg_decrypt_user(kvm,
1335
__sme_page_pa(src_p[0]) + s_off,
1336
(void __user *)dst_vaddr,
1337
__sme_page_pa(dst_p[0]) + d_off,
1338
len, &argp->error);
1339
else
1340
ret = __sev_dbg_encrypt_user(kvm,
1341
__sme_page_pa(src_p[0]) + s_off,
1342
(void __user *)vaddr,
1343
__sme_page_pa(dst_p[0]) + d_off,
1344
(void __user *)dst_vaddr,
1345
len, &argp->error);
1346
1347
sev_unpin_memory(kvm, src_p, n);
1348
sev_unpin_memory(kvm, dst_p, n);
1349
1350
if (ret)
1351
goto err;
1352
1353
next_vaddr = vaddr + len;
1354
dst_vaddr = dst_vaddr + len;
1355
size -= len;
1356
}
1357
err:
1358
return ret;
1359
}
1360
1361
static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
1362
{
1363
struct sev_data_launch_secret data;
1364
struct kvm_sev_launch_secret params;
1365
struct page **pages;
1366
void *blob, *hdr;
1367
unsigned long n, i;
1368
int ret, offset;
1369
1370
if (!sev_guest(kvm))
1371
return -ENOTTY;
1372
1373
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
1374
return -EFAULT;
1375
1376
pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, FOLL_WRITE);
1377
if (IS_ERR(pages))
1378
return PTR_ERR(pages);
1379
1380
/*
1381
* Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1382
* place; the cache may contain the data that was written unencrypted.
1383
*/
1384
sev_clflush_pages(pages, n);
1385
1386
/*
1387
* The secret must be copied into contiguous memory region, lets verify
1388
* that userspace memory pages are contiguous before we issue command.
1389
*/
1390
if (get_num_contig_pages(0, pages, n) != n) {
1391
ret = -EINVAL;
1392
goto e_unpin_memory;
1393
}
1394
1395
memset(&data, 0, sizeof(data));
1396
1397
offset = params.guest_uaddr & (PAGE_SIZE - 1);
1398
data.guest_address = __sme_page_pa(pages[0]) + offset;
1399
data.guest_len = params.guest_len;
1400
1401
blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1402
if (IS_ERR(blob)) {
1403
ret = PTR_ERR(blob);
1404
goto e_unpin_memory;
1405
}
1406
1407
data.trans_address = __psp_pa(blob);
1408
data.trans_len = params.trans_len;
1409
1410
hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1411
if (IS_ERR(hdr)) {
1412
ret = PTR_ERR(hdr);
1413
goto e_free_blob;
1414
}
1415
data.hdr_address = __psp_pa(hdr);
1416
data.hdr_len = params.hdr_len;
1417
1418
data.handle = to_kvm_sev_info(kvm)->handle;
1419
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1420
1421
kfree(hdr);
1422
1423
e_free_blob:
1424
kfree(blob);
1425
e_unpin_memory:
1426
/* content of memory is updated, mark pages dirty */
1427
for (i = 0; i < n; i++) {
1428
set_page_dirty_lock(pages[i]);
1429
mark_page_accessed(pages[i]);
1430
}
1431
sev_unpin_memory(kvm, pages, n);
1432
return ret;
1433
}
1434
1435
static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1436
{
1437
void __user *report = u64_to_user_ptr(argp->data);
1438
struct sev_data_attestation_report data;
1439
struct kvm_sev_attestation_report params;
1440
void __user *p;
1441
void *blob = NULL;
1442
int ret;
1443
1444
if (!sev_guest(kvm))
1445
return -ENOTTY;
1446
1447
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
1448
return -EFAULT;
1449
1450
memset(&data, 0, sizeof(data));
1451
1452
/* User wants to query the blob length */
1453
if (!params.len)
1454
goto cmd;
1455
1456
p = u64_to_user_ptr(params.uaddr);
1457
if (p) {
1458
if (params.len > SEV_FW_BLOB_MAX_SIZE)
1459
return -EINVAL;
1460
1461
blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1462
if (!blob)
1463
return -ENOMEM;
1464
1465
data.address = __psp_pa(blob);
1466
data.len = params.len;
1467
memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1468
}
1469
cmd:
1470
data.handle = to_kvm_sev_info(kvm)->handle;
1471
ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1472
/*
1473
* If we query the session length, FW responded with expected data.
1474
*/
1475
if (!params.len)
1476
goto done;
1477
1478
if (ret)
1479
goto e_free_blob;
1480
1481
if (blob) {
1482
if (copy_to_user(p, blob, params.len))
1483
ret = -EFAULT;
1484
}
1485
1486
done:
1487
params.len = data.len;
1488
if (copy_to_user(report, &params, sizeof(params)))
1489
ret = -EFAULT;
1490
e_free_blob:
1491
kfree(blob);
1492
return ret;
1493
}
1494
1495
/* Userspace wants to query session length. */
1496
static int
1497
__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1498
struct kvm_sev_send_start *params)
1499
{
1500
struct sev_data_send_start data;
1501
int ret;
1502
1503
memset(&data, 0, sizeof(data));
1504
data.handle = to_kvm_sev_info(kvm)->handle;
1505
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1506
1507
params->session_len = data.session_len;
1508
if (copy_to_user(u64_to_user_ptr(argp->data), params,
1509
sizeof(struct kvm_sev_send_start)))
1510
ret = -EFAULT;
1511
1512
return ret;
1513
}
1514
1515
static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1516
{
1517
struct sev_data_send_start data;
1518
struct kvm_sev_send_start params;
1519
void *amd_certs, *session_data;
1520
void *pdh_cert, *plat_certs;
1521
int ret;
1522
1523
if (!sev_guest(kvm))
1524
return -ENOTTY;
1525
1526
if (copy_from_user(&params, u64_to_user_ptr(argp->data),
1527
sizeof(struct kvm_sev_send_start)))
1528
return -EFAULT;
1529
1530
/* if session_len is zero, userspace wants to query the session length */
1531
if (!params.session_len)
1532
return __sev_send_start_query_session_length(kvm, argp,
1533
&params);
1534
1535
/* some sanity checks */
1536
if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1537
!params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1538
return -EINVAL;
1539
1540
/* allocate the memory to hold the session data blob */
1541
session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1542
if (!session_data)
1543
return -ENOMEM;
1544
1545
/* copy the certificate blobs from userspace */
1546
pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1547
params.pdh_cert_len);
1548
if (IS_ERR(pdh_cert)) {
1549
ret = PTR_ERR(pdh_cert);
1550
goto e_free_session;
1551
}
1552
1553
plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1554
params.plat_certs_len);
1555
if (IS_ERR(plat_certs)) {
1556
ret = PTR_ERR(plat_certs);
1557
goto e_free_pdh;
1558
}
1559
1560
amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1561
params.amd_certs_len);
1562
if (IS_ERR(amd_certs)) {
1563
ret = PTR_ERR(amd_certs);
1564
goto e_free_plat_cert;
1565
}
1566
1567
/* populate the FW SEND_START field with system physical address */
1568
memset(&data, 0, sizeof(data));
1569
data.pdh_cert_address = __psp_pa(pdh_cert);
1570
data.pdh_cert_len = params.pdh_cert_len;
1571
data.plat_certs_address = __psp_pa(plat_certs);
1572
data.plat_certs_len = params.plat_certs_len;
1573
data.amd_certs_address = __psp_pa(amd_certs);
1574
data.amd_certs_len = params.amd_certs_len;
1575
data.session_address = __psp_pa(session_data);
1576
data.session_len = params.session_len;
1577
data.handle = to_kvm_sev_info(kvm)->handle;
1578
1579
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1580
1581
if (!ret && copy_to_user(u64_to_user_ptr(params.session_uaddr),
1582
session_data, params.session_len)) {
1583
ret = -EFAULT;
1584
goto e_free_amd_cert;
1585
}
1586
1587
params.policy = data.policy;
1588
params.session_len = data.session_len;
1589
if (copy_to_user(u64_to_user_ptr(argp->data), &params,
1590
sizeof(struct kvm_sev_send_start)))
1591
ret = -EFAULT;
1592
1593
e_free_amd_cert:
1594
kfree(amd_certs);
1595
e_free_plat_cert:
1596
kfree(plat_certs);
1597
e_free_pdh:
1598
kfree(pdh_cert);
1599
e_free_session:
1600
kfree(session_data);
1601
return ret;
1602
}
1603
1604
/* Userspace wants to query either header or trans length. */
1605
static int
1606
__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1607
struct kvm_sev_send_update_data *params)
1608
{
1609
struct sev_data_send_update_data data;
1610
int ret;
1611
1612
memset(&data, 0, sizeof(data));
1613
data.handle = to_kvm_sev_info(kvm)->handle;
1614
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1615
1616
params->hdr_len = data.hdr_len;
1617
params->trans_len = data.trans_len;
1618
1619
if (copy_to_user(u64_to_user_ptr(argp->data), params,
1620
sizeof(struct kvm_sev_send_update_data)))
1621
ret = -EFAULT;
1622
1623
return ret;
1624
}
1625
1626
static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1627
{
1628
struct sev_data_send_update_data data;
1629
struct kvm_sev_send_update_data params;
1630
void *hdr, *trans_data;
1631
struct page **guest_page;
1632
unsigned long n;
1633
int ret, offset;
1634
1635
if (!sev_guest(kvm))
1636
return -ENOTTY;
1637
1638
if (copy_from_user(&params, u64_to_user_ptr(argp->data),
1639
sizeof(struct kvm_sev_send_update_data)))
1640
return -EFAULT;
1641
1642
/* userspace wants to query either header or trans length */
1643
if (!params.trans_len || !params.hdr_len)
1644
return __sev_send_update_data_query_lengths(kvm, argp, &params);
1645
1646
if (!params.trans_uaddr || !params.guest_uaddr ||
1647
!params.guest_len || !params.hdr_uaddr)
1648
return -EINVAL;
1649
1650
/* Check if we are crossing the page boundary */
1651
offset = params.guest_uaddr & (PAGE_SIZE - 1);
1652
if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1653
return -EINVAL;
1654
1655
/* Pin guest memory */
1656
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1657
PAGE_SIZE, &n, 0);
1658
if (IS_ERR(guest_page))
1659
return PTR_ERR(guest_page);
1660
1661
/* allocate memory for header and transport buffer */
1662
ret = -ENOMEM;
1663
hdr = kzalloc(params.hdr_len, GFP_KERNEL);
1664
if (!hdr)
1665
goto e_unpin;
1666
1667
trans_data = kzalloc(params.trans_len, GFP_KERNEL);
1668
if (!trans_data)
1669
goto e_free_hdr;
1670
1671
memset(&data, 0, sizeof(data));
1672
data.hdr_address = __psp_pa(hdr);
1673
data.hdr_len = params.hdr_len;
1674
data.trans_address = __psp_pa(trans_data);
1675
data.trans_len = params.trans_len;
1676
1677
/* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1678
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1679
data.guest_address |= sev_me_mask;
1680
data.guest_len = params.guest_len;
1681
data.handle = to_kvm_sev_info(kvm)->handle;
1682
1683
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1684
1685
if (ret)
1686
goto e_free_trans_data;
1687
1688
/* copy transport buffer to user space */
1689
if (copy_to_user(u64_to_user_ptr(params.trans_uaddr),
1690
trans_data, params.trans_len)) {
1691
ret = -EFAULT;
1692
goto e_free_trans_data;
1693
}
1694
1695
/* Copy packet header to userspace. */
1696
if (copy_to_user(u64_to_user_ptr(params.hdr_uaddr), hdr,
1697
params.hdr_len))
1698
ret = -EFAULT;
1699
1700
e_free_trans_data:
1701
kfree(trans_data);
1702
e_free_hdr:
1703
kfree(hdr);
1704
e_unpin:
1705
sev_unpin_memory(kvm, guest_page, n);
1706
1707
return ret;
1708
}
1709
1710
static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1711
{
1712
struct sev_data_send_finish data;
1713
1714
if (!sev_guest(kvm))
1715
return -ENOTTY;
1716
1717
data.handle = to_kvm_sev_info(kvm)->handle;
1718
return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1719
}
1720
1721
static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1722
{
1723
struct sev_data_send_cancel data;
1724
1725
if (!sev_guest(kvm))
1726
return -ENOTTY;
1727
1728
data.handle = to_kvm_sev_info(kvm)->handle;
1729
return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1730
}
1731
1732
static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1733
{
1734
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
1735
struct sev_data_receive_start start;
1736
struct kvm_sev_receive_start params;
1737
int *error = &argp->error;
1738
void *session_data;
1739
void *pdh_data;
1740
int ret;
1741
1742
if (!sev_guest(kvm))
1743
return -ENOTTY;
1744
1745
/* Get parameter from the userspace */
1746
if (copy_from_user(&params, u64_to_user_ptr(argp->data),
1747
sizeof(struct kvm_sev_receive_start)))
1748
return -EFAULT;
1749
1750
/* some sanity checks */
1751
if (!params.pdh_uaddr || !params.pdh_len ||
1752
!params.session_uaddr || !params.session_len)
1753
return -EINVAL;
1754
1755
pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1756
if (IS_ERR(pdh_data))
1757
return PTR_ERR(pdh_data);
1758
1759
session_data = psp_copy_user_blob(params.session_uaddr,
1760
params.session_len);
1761
if (IS_ERR(session_data)) {
1762
ret = PTR_ERR(session_data);
1763
goto e_free_pdh;
1764
}
1765
1766
memset(&start, 0, sizeof(start));
1767
start.handle = params.handle;
1768
start.policy = params.policy;
1769
start.pdh_cert_address = __psp_pa(pdh_data);
1770
start.pdh_cert_len = params.pdh_len;
1771
start.session_address = __psp_pa(session_data);
1772
start.session_len = params.session_len;
1773
1774
/* create memory encryption context */
1775
ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1776
error);
1777
if (ret)
1778
goto e_free_session;
1779
1780
/* Bind ASID to this guest */
1781
ret = sev_bind_asid(kvm, start.handle, error);
1782
if (ret) {
1783
sev_decommission(start.handle);
1784
goto e_free_session;
1785
}
1786
1787
params.handle = start.handle;
1788
if (copy_to_user(u64_to_user_ptr(argp->data),
1789
&params, sizeof(struct kvm_sev_receive_start))) {
1790
ret = -EFAULT;
1791
sev_unbind_asid(kvm, start.handle);
1792
goto e_free_session;
1793
}
1794
1795
sev->handle = start.handle;
1796
sev->fd = argp->sev_fd;
1797
1798
e_free_session:
1799
kfree(session_data);
1800
e_free_pdh:
1801
kfree(pdh_data);
1802
1803
return ret;
1804
}
1805
1806
static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1807
{
1808
struct kvm_sev_receive_update_data params;
1809
struct sev_data_receive_update_data data;
1810
void *hdr = NULL, *trans = NULL;
1811
struct page **guest_page;
1812
unsigned long n;
1813
int ret, offset;
1814
1815
if (!sev_guest(kvm))
1816
return -EINVAL;
1817
1818
if (copy_from_user(&params, u64_to_user_ptr(argp->data),
1819
sizeof(struct kvm_sev_receive_update_data)))
1820
return -EFAULT;
1821
1822
if (!params.hdr_uaddr || !params.hdr_len ||
1823
!params.guest_uaddr || !params.guest_len ||
1824
!params.trans_uaddr || !params.trans_len)
1825
return -EINVAL;
1826
1827
/* Check if we are crossing the page boundary */
1828
offset = params.guest_uaddr & (PAGE_SIZE - 1);
1829
if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1830
return -EINVAL;
1831
1832
hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1833
if (IS_ERR(hdr))
1834
return PTR_ERR(hdr);
1835
1836
trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1837
if (IS_ERR(trans)) {
1838
ret = PTR_ERR(trans);
1839
goto e_free_hdr;
1840
}
1841
1842
memset(&data, 0, sizeof(data));
1843
data.hdr_address = __psp_pa(hdr);
1844
data.hdr_len = params.hdr_len;
1845
data.trans_address = __psp_pa(trans);
1846
data.trans_len = params.trans_len;
1847
1848
/* Pin guest memory */
1849
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1850
PAGE_SIZE, &n, FOLL_WRITE);
1851
if (IS_ERR(guest_page)) {
1852
ret = PTR_ERR(guest_page);
1853
goto e_free_trans;
1854
}
1855
1856
/*
1857
* Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1858
* encrypts the written data with the guest's key, and the cache may
1859
* contain dirty, unencrypted data.
1860
*/
1861
sev_clflush_pages(guest_page, n);
1862
1863
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1864
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1865
data.guest_address |= sev_me_mask;
1866
data.guest_len = params.guest_len;
1867
data.handle = to_kvm_sev_info(kvm)->handle;
1868
1869
ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1870
&argp->error);
1871
1872
sev_unpin_memory(kvm, guest_page, n);
1873
1874
e_free_trans:
1875
kfree(trans);
1876
e_free_hdr:
1877
kfree(hdr);
1878
1879
return ret;
1880
}
1881
1882
static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1883
{
1884
struct sev_data_receive_finish data;
1885
1886
if (!sev_guest(kvm))
1887
return -ENOTTY;
1888
1889
data.handle = to_kvm_sev_info(kvm)->handle;
1890
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1891
}
1892
1893
static bool is_cmd_allowed_from_mirror(u32 cmd_id)
1894
{
1895
/*
1896
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1897
* active mirror VMs. Also allow the debugging and status commands.
1898
*/
1899
if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1900
cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1901
cmd_id == KVM_SEV_DBG_ENCRYPT)
1902
return true;
1903
1904
return false;
1905
}
1906
1907
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1908
{
1909
struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm);
1910
struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm);
1911
int r = -EBUSY;
1912
1913
if (dst_kvm == src_kvm)
1914
return -EINVAL;
1915
1916
/*
1917
* Bail if these VMs are already involved in a migration to avoid
1918
* deadlock between two VMs trying to migrate to/from each other.
1919
*/
1920
if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1921
return -EBUSY;
1922
1923
if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1924
goto release_dst;
1925
1926
r = -EINTR;
1927
if (mutex_lock_killable(&dst_kvm->lock))
1928
goto release_src;
1929
if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1930
goto unlock_dst;
1931
return 0;
1932
1933
unlock_dst:
1934
mutex_unlock(&dst_kvm->lock);
1935
release_src:
1936
atomic_set_release(&src_sev->migration_in_progress, 0);
1937
release_dst:
1938
atomic_set_release(&dst_sev->migration_in_progress, 0);
1939
return r;
1940
}
1941
1942
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1943
{
1944
struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm);
1945
struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm);
1946
1947
mutex_unlock(&dst_kvm->lock);
1948
mutex_unlock(&src_kvm->lock);
1949
atomic_set_release(&dst_sev->migration_in_progress, 0);
1950
atomic_set_release(&src_sev->migration_in_progress, 0);
1951
}
1952
1953
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
1954
{
1955
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
1956
struct kvm_sev_info *src = to_kvm_sev_info(src_kvm);
1957
struct kvm_vcpu *dst_vcpu, *src_vcpu;
1958
struct vcpu_svm *dst_svm, *src_svm;
1959
struct kvm_sev_info *mirror;
1960
unsigned long i;
1961
1962
dst->active = true;
1963
dst->asid = src->asid;
1964
dst->handle = src->handle;
1965
dst->pages_locked = src->pages_locked;
1966
dst->enc_context_owner = src->enc_context_owner;
1967
dst->es_active = src->es_active;
1968
dst->vmsa_features = src->vmsa_features;
1969
1970
src->asid = 0;
1971
src->active = false;
1972
src->handle = 0;
1973
src->pages_locked = 0;
1974
src->enc_context_owner = NULL;
1975
src->es_active = false;
1976
1977
list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1978
1979
/*
1980
* If this VM has mirrors, "transfer" each mirror's refcount of the
1981
* source to the destination (this KVM). The caller holds a reference
1982
* to the source, so there's no danger of use-after-free.
1983
*/
1984
list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
1985
list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) {
1986
kvm_get_kvm(dst_kvm);
1987
kvm_put_kvm(src_kvm);
1988
mirror->enc_context_owner = dst_kvm;
1989
}
1990
1991
/*
1992
* If this VM is a mirror, remove the old mirror from the owners list
1993
* and add the new mirror to the list.
1994
*/
1995
if (is_mirroring_enc_context(dst_kvm)) {
1996
struct kvm_sev_info *owner_sev_info = to_kvm_sev_info(dst->enc_context_owner);
1997
1998
list_del(&src->mirror_entry);
1999
list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
2000
}
2001
2002
kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
2003
dst_svm = to_svm(dst_vcpu);
2004
2005
sev_init_vmcb(dst_svm, false);
2006
2007
if (!dst->es_active)
2008
continue;
2009
2010
/*
2011
* Note, the source is not required to have the same number of
2012
* vCPUs as the destination when migrating a vanilla SEV VM.
2013
*/
2014
src_vcpu = kvm_get_vcpu(src_kvm, i);
2015
src_svm = to_svm(src_vcpu);
2016
2017
/*
2018
* Transfer VMSA and GHCB state to the destination. Nullify and
2019
* clear source fields as appropriate, the state now belongs to
2020
* the destination.
2021
*/
2022
memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
2023
dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
2024
dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
2025
dst_vcpu->arch.guest_state_protected = true;
2026
2027
memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
2028
src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
2029
src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
2030
src_vcpu->arch.guest_state_protected = false;
2031
}
2032
}
2033
2034
static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
2035
{
2036
struct kvm_vcpu *src_vcpu;
2037
unsigned long i;
2038
2039
if (src->created_vcpus != atomic_read(&src->online_vcpus) ||
2040
dst->created_vcpus != atomic_read(&dst->online_vcpus))
2041
return -EBUSY;
2042
2043
if (!sev_es_guest(src))
2044
return 0;
2045
2046
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
2047
return -EINVAL;
2048
2049
kvm_for_each_vcpu(i, src_vcpu, src) {
2050
if (!src_vcpu->arch.guest_state_protected)
2051
return -EINVAL;
2052
}
2053
2054
return 0;
2055
}
2056
2057
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
2058
{
2059
struct kvm_sev_info *dst_sev = to_kvm_sev_info(kvm);
2060
struct kvm_sev_info *src_sev, *cg_cleanup_sev;
2061
CLASS(fd, f)(source_fd);
2062
struct kvm *source_kvm;
2063
bool charged = false;
2064
int ret;
2065
2066
if (fd_empty(f))
2067
return -EBADF;
2068
2069
if (!file_is_kvm(fd_file(f)))
2070
return -EBADF;
2071
2072
source_kvm = fd_file(f)->private_data;
2073
ret = sev_lock_two_vms(kvm, source_kvm);
2074
if (ret)
2075
return ret;
2076
2077
if (kvm->arch.vm_type != source_kvm->arch.vm_type ||
2078
sev_guest(kvm) || !sev_guest(source_kvm)) {
2079
ret = -EINVAL;
2080
goto out_unlock;
2081
}
2082
2083
src_sev = to_kvm_sev_info(source_kvm);
2084
2085
dst_sev->misc_cg = get_current_misc_cg();
2086
cg_cleanup_sev = dst_sev;
2087
if (dst_sev->misc_cg != src_sev->misc_cg) {
2088
ret = sev_misc_cg_try_charge(dst_sev);
2089
if (ret)
2090
goto out_dst_cgroup;
2091
charged = true;
2092
}
2093
2094
ret = kvm_lock_all_vcpus(kvm);
2095
if (ret)
2096
goto out_dst_cgroup;
2097
ret = kvm_lock_all_vcpus(source_kvm);
2098
if (ret)
2099
goto out_dst_vcpu;
2100
2101
ret = sev_check_source_vcpus(kvm, source_kvm);
2102
if (ret)
2103
goto out_source_vcpu;
2104
2105
/*
2106
* Allocate a new have_run_cpus for the destination, i.e. don't copy
2107
* the set of CPUs from the source. If a CPU was used to run a vCPU in
2108
* the source VM but is never used for the destination VM, then the CPU
2109
* can only have cached memory that was accessible to the source VM.
2110
*/
2111
if (!zalloc_cpumask_var(&dst_sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) {
2112
ret = -ENOMEM;
2113
goto out_source_vcpu;
2114
}
2115
2116
sev_migrate_from(kvm, source_kvm);
2117
kvm_vm_dead(source_kvm);
2118
cg_cleanup_sev = src_sev;
2119
ret = 0;
2120
2121
out_source_vcpu:
2122
kvm_unlock_all_vcpus(source_kvm);
2123
out_dst_vcpu:
2124
kvm_unlock_all_vcpus(kvm);
2125
out_dst_cgroup:
2126
/* Operates on the source on success, on the destination on failure. */
2127
if (charged)
2128
sev_misc_cg_uncharge(cg_cleanup_sev);
2129
put_misc_cg(cg_cleanup_sev->misc_cg);
2130
cg_cleanup_sev->misc_cg = NULL;
2131
out_unlock:
2132
sev_unlock_two_vms(kvm, source_kvm);
2133
return ret;
2134
}
2135
2136
int sev_dev_get_attr(u32 group, u64 attr, u64 *val)
2137
{
2138
if (group != KVM_X86_GRP_SEV)
2139
return -ENXIO;
2140
2141
switch (attr) {
2142
case KVM_X86_SEV_VMSA_FEATURES:
2143
*val = sev_supported_vmsa_features;
2144
return 0;
2145
2146
default:
2147
return -ENXIO;
2148
}
2149
}
2150
2151
/*
2152
* The guest context contains all the information, keys and metadata
2153
* associated with the guest that the firmware tracks to implement SEV
2154
* and SNP features. The firmware stores the guest context in hypervisor
2155
* provide page via the SNP_GCTX_CREATE command.
2156
*/
2157
static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd *argp)
2158
{
2159
struct sev_data_snp_addr data = {};
2160
void *context;
2161
int rc;
2162
2163
/* Allocate memory for context page */
2164
context = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
2165
if (!context)
2166
return NULL;
2167
2168
data.address = __psp_pa(context);
2169
rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_GCTX_CREATE, &data, &argp->error);
2170
if (rc) {
2171
pr_warn("Failed to create SEV-SNP context, rc %d fw_error %d",
2172
rc, argp->error);
2173
snp_free_firmware_page(context);
2174
return NULL;
2175
}
2176
2177
return context;
2178
}
2179
2180
static int snp_bind_asid(struct kvm *kvm, int *error)
2181
{
2182
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2183
struct sev_data_snp_activate data = {0};
2184
2185
data.gctx_paddr = __psp_pa(sev->snp_context);
2186
data.asid = sev_get_asid(kvm);
2187
return sev_issue_cmd(kvm, SEV_CMD_SNP_ACTIVATE, &data, error);
2188
}
2189
2190
static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
2191
{
2192
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2193
struct sev_data_snp_launch_start start = {0};
2194
struct kvm_sev_snp_launch_start params;
2195
int rc;
2196
2197
if (!sev_snp_guest(kvm))
2198
return -ENOTTY;
2199
2200
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
2201
return -EFAULT;
2202
2203
/* Don't allow userspace to allocate memory for more than 1 SNP context. */
2204
if (sev->snp_context)
2205
return -EINVAL;
2206
2207
if (params.flags)
2208
return -EINVAL;
2209
2210
if (params.policy & ~SNP_POLICY_MASK_VALID)
2211
return -EINVAL;
2212
2213
/* Check for policy bits that must be set */
2214
if (!(params.policy & SNP_POLICY_MASK_RSVD_MBO))
2215
return -EINVAL;
2216
2217
if (snp_is_secure_tsc_enabled(kvm)) {
2218
if (WARN_ON_ONCE(!kvm->arch.default_tsc_khz))
2219
return -EINVAL;
2220
2221
start.desired_tsc_khz = kvm->arch.default_tsc_khz;
2222
}
2223
2224
sev->snp_context = snp_context_create(kvm, argp);
2225
if (!sev->snp_context)
2226
return -ENOTTY;
2227
2228
start.gctx_paddr = __psp_pa(sev->snp_context);
2229
start.policy = params.policy;
2230
2231
memcpy(start.gosvw, params.gosvw, sizeof(params.gosvw));
2232
rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_START, &start, &argp->error);
2233
if (rc) {
2234
pr_debug("%s: SEV_CMD_SNP_LAUNCH_START firmware command failed, rc %d\n",
2235
__func__, rc);
2236
goto e_free_context;
2237
}
2238
2239
sev->policy = params.policy;
2240
sev->fd = argp->sev_fd;
2241
rc = snp_bind_asid(kvm, &argp->error);
2242
if (rc) {
2243
pr_debug("%s: Failed to bind ASID to SEV-SNP context, rc %d\n",
2244
__func__, rc);
2245
goto e_free_context;
2246
}
2247
2248
return 0;
2249
2250
e_free_context:
2251
snp_decommission_context(kvm);
2252
2253
return rc;
2254
}
2255
2256
struct sev_gmem_populate_args {
2257
__u8 type;
2258
int sev_fd;
2259
int fw_error;
2260
};
2261
2262
static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn,
2263
void __user *src, int order, void *opaque)
2264
{
2265
struct sev_gmem_populate_args *sev_populate_args = opaque;
2266
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2267
int n_private = 0, ret, i;
2268
int npages = (1 << order);
2269
gfn_t gfn;
2270
2271
if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src))
2272
return -EINVAL;
2273
2274
for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
2275
struct sev_data_snp_launch_update fw_args = {0};
2276
bool assigned = false;
2277
int level;
2278
2279
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
2280
if (ret || assigned) {
2281
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
2282
__func__, gfn, ret, assigned);
2283
ret = ret ? -EINVAL : -EEXIST;
2284
goto err;
2285
}
2286
2287
if (src) {
2288
void *vaddr = kmap_local_pfn(pfn + i);
2289
2290
if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
2291
ret = -EFAULT;
2292
goto err;
2293
}
2294
kunmap_local(vaddr);
2295
}
2296
2297
ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
2298
sev_get_asid(kvm), true);
2299
if (ret)
2300
goto err;
2301
2302
n_private++;
2303
2304
fw_args.gctx_paddr = __psp_pa(sev->snp_context);
2305
fw_args.address = __sme_set(pfn_to_hpa(pfn + i));
2306
fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K);
2307
fw_args.page_type = sev_populate_args->type;
2308
2309
ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
2310
&fw_args, &sev_populate_args->fw_error);
2311
if (ret)
2312
goto fw_err;
2313
}
2314
2315
return 0;
2316
2317
fw_err:
2318
/*
2319
* If the firmware command failed handle the reclaim and cleanup of that
2320
* PFN specially vs. prior pages which can be cleaned up below without
2321
* needing to reclaim in advance.
2322
*
2323
* Additionally, when invalid CPUID function entries are detected,
2324
* firmware writes the expected values into the page and leaves it
2325
* unencrypted so it can be used for debugging and error-reporting.
2326
*
2327
* Copy this page back into the source buffer so userspace can use this
2328
* information to provide information on which CPUID leaves/fields
2329
* failed CPUID validation.
2330
*/
2331
if (!snp_page_reclaim(kvm, pfn + i) &&
2332
sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
2333
sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
2334
void *vaddr = kmap_local_pfn(pfn + i);
2335
2336
if (copy_to_user(src + i * PAGE_SIZE, vaddr, PAGE_SIZE))
2337
pr_debug("Failed to write CPUID page back to userspace\n");
2338
2339
kunmap_local(vaddr);
2340
}
2341
2342
/* pfn + i is hypervisor-owned now, so skip below cleanup for it. */
2343
n_private--;
2344
2345
err:
2346
pr_debug("%s: exiting with error ret %d (fw_error %d), restoring %d gmem PFNs to shared.\n",
2347
__func__, ret, sev_populate_args->fw_error, n_private);
2348
for (i = 0; i < n_private; i++)
2349
kvm_rmp_make_shared(kvm, pfn + i, PG_LEVEL_4K);
2350
2351
return ret;
2352
}
2353
2354
static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
2355
{
2356
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2357
struct sev_gmem_populate_args sev_populate_args = {0};
2358
struct kvm_sev_snp_launch_update params;
2359
struct kvm_memory_slot *memslot;
2360
long npages, count;
2361
void __user *src;
2362
int ret = 0;
2363
2364
if (!sev_snp_guest(kvm) || !sev->snp_context)
2365
return -EINVAL;
2366
2367
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
2368
return -EFAULT;
2369
2370
pr_debug("%s: GFN start 0x%llx length 0x%llx type %d flags %d\n", __func__,
2371
params.gfn_start, params.len, params.type, params.flags);
2372
2373
if (!params.len || !PAGE_ALIGNED(params.len) || params.flags ||
2374
(params.type != KVM_SEV_SNP_PAGE_TYPE_NORMAL &&
2375
params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO &&
2376
params.type != KVM_SEV_SNP_PAGE_TYPE_UNMEASURED &&
2377
params.type != KVM_SEV_SNP_PAGE_TYPE_SECRETS &&
2378
params.type != KVM_SEV_SNP_PAGE_TYPE_CPUID))
2379
return -EINVAL;
2380
2381
npages = params.len / PAGE_SIZE;
2382
2383
/*
2384
* For each GFN that's being prepared as part of the initial guest
2385
* state, the following pre-conditions are verified:
2386
*
2387
* 1) The backing memslot is a valid private memslot.
2388
* 2) The GFN has been set to private via KVM_SET_MEMORY_ATTRIBUTES
2389
* beforehand.
2390
* 3) The PFN of the guest_memfd has not already been set to private
2391
* in the RMP table.
2392
*
2393
* The KVM MMU relies on kvm->mmu_invalidate_seq to retry nested page
2394
* faults if there's a race between a fault and an attribute update via
2395
* KVM_SET_MEMORY_ATTRIBUTES, and a similar approach could be utilized
2396
* here. However, kvm->slots_lock guards against both this as well as
2397
* concurrent memslot updates occurring while these checks are being
2398
* performed, so use that here to make it easier to reason about the
2399
* initial expected state and better guard against unexpected
2400
* situations.
2401
*/
2402
mutex_lock(&kvm->slots_lock);
2403
2404
memslot = gfn_to_memslot(kvm, params.gfn_start);
2405
if (!kvm_slot_has_gmem(memslot)) {
2406
ret = -EINVAL;
2407
goto out;
2408
}
2409
2410
sev_populate_args.sev_fd = argp->sev_fd;
2411
sev_populate_args.type = params.type;
2412
src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
2413
2414
count = kvm_gmem_populate(kvm, params.gfn_start, src, npages,
2415
sev_gmem_post_populate, &sev_populate_args);
2416
if (count < 0) {
2417
argp->error = sev_populate_args.fw_error;
2418
pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n",
2419
__func__, count, argp->error);
2420
ret = -EIO;
2421
} else {
2422
params.gfn_start += count;
2423
params.len -= count * PAGE_SIZE;
2424
if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
2425
params.uaddr += count * PAGE_SIZE;
2426
2427
ret = 0;
2428
if (copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params)))
2429
ret = -EFAULT;
2430
}
2431
2432
out:
2433
mutex_unlock(&kvm->slots_lock);
2434
2435
return ret;
2436
}
2437
2438
static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
2439
{
2440
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2441
struct sev_data_snp_launch_update data = {};
2442
struct kvm_vcpu *vcpu;
2443
unsigned long i;
2444
int ret;
2445
2446
data.gctx_paddr = __psp_pa(sev->snp_context);
2447
data.page_type = SNP_PAGE_TYPE_VMSA;
2448
2449
kvm_for_each_vcpu(i, vcpu, kvm) {
2450
struct vcpu_svm *svm = to_svm(vcpu);
2451
u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
2452
2453
ret = sev_es_sync_vmsa(svm);
2454
if (ret)
2455
return ret;
2456
2457
/* Transition the VMSA page to a firmware state. */
2458
ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true);
2459
if (ret)
2460
return ret;
2461
2462
/* Issue the SNP command to encrypt the VMSA */
2463
data.address = __sme_pa(svm->sev_es.vmsa);
2464
ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
2465
&data, &argp->error);
2466
if (ret) {
2467
snp_page_reclaim(kvm, pfn);
2468
2469
return ret;
2470
}
2471
2472
svm->vcpu.arch.guest_state_protected = true;
2473
/*
2474
* SEV-ES (and thus SNP) guest mandates LBR Virtualization to
2475
* be _always_ ON. Enable it only after setting
2476
* guest_state_protected because KVM_SET_MSRS allows dynamic
2477
* toggling of LBRV (for performance reason) on write access to
2478
* MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
2479
*/
2480
svm_enable_lbrv(vcpu);
2481
}
2482
2483
return 0;
2484
}
2485
2486
static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
2487
{
2488
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2489
struct kvm_sev_snp_launch_finish params;
2490
struct sev_data_snp_launch_finish *data;
2491
void *id_block = NULL, *id_auth = NULL;
2492
int ret;
2493
2494
if (!sev_snp_guest(kvm))
2495
return -ENOTTY;
2496
2497
if (!sev->snp_context)
2498
return -EINVAL;
2499
2500
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
2501
return -EFAULT;
2502
2503
if (params.flags)
2504
return -EINVAL;
2505
2506
/* Measure all vCPUs using LAUNCH_UPDATE before finalizing the launch flow. */
2507
ret = snp_launch_update_vmsa(kvm, argp);
2508
if (ret)
2509
return ret;
2510
2511
data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
2512
if (!data)
2513
return -ENOMEM;
2514
2515
if (params.id_block_en) {
2516
id_block = psp_copy_user_blob(params.id_block_uaddr, KVM_SEV_SNP_ID_BLOCK_SIZE);
2517
if (IS_ERR(id_block)) {
2518
ret = PTR_ERR(id_block);
2519
goto e_free;
2520
}
2521
2522
data->id_block_en = 1;
2523
data->id_block_paddr = __sme_pa(id_block);
2524
2525
id_auth = psp_copy_user_blob(params.id_auth_uaddr, KVM_SEV_SNP_ID_AUTH_SIZE);
2526
if (IS_ERR(id_auth)) {
2527
ret = PTR_ERR(id_auth);
2528
goto e_free_id_block;
2529
}
2530
2531
data->id_auth_paddr = __sme_pa(id_auth);
2532
2533
if (params.auth_key_en)
2534
data->auth_key_en = 1;
2535
}
2536
2537
data->vcek_disabled = params.vcek_disabled;
2538
2539
memcpy(data->host_data, params.host_data, KVM_SEV_SNP_FINISH_DATA_SIZE);
2540
data->gctx_paddr = __psp_pa(sev->snp_context);
2541
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
2542
2543
/*
2544
* Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages
2545
* can be given to the guest simply by marking the RMP entry as private.
2546
* This can happen on first access and also with KVM_PRE_FAULT_MEMORY.
2547
*/
2548
if (!ret)
2549
kvm->arch.pre_fault_allowed = true;
2550
2551
kfree(id_auth);
2552
2553
e_free_id_block:
2554
kfree(id_block);
2555
2556
e_free:
2557
kfree(data);
2558
2559
return ret;
2560
}
2561
2562
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
2563
{
2564
struct kvm_sev_cmd sev_cmd;
2565
int r;
2566
2567
if (!sev_enabled)
2568
return -ENOTTY;
2569
2570
if (!argp)
2571
return 0;
2572
2573
if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
2574
return -EFAULT;
2575
2576
mutex_lock(&kvm->lock);
2577
2578
/* Only the enc_context_owner handles some memory enc operations. */
2579
if (is_mirroring_enc_context(kvm) &&
2580
!is_cmd_allowed_from_mirror(sev_cmd.id)) {
2581
r = -EINVAL;
2582
goto out;
2583
}
2584
2585
/*
2586
* Once KVM_SEV_INIT2 initializes a KVM instance as an SNP guest, only
2587
* allow the use of SNP-specific commands.
2588
*/
2589
if (sev_snp_guest(kvm) && sev_cmd.id < KVM_SEV_SNP_LAUNCH_START) {
2590
r = -EPERM;
2591
goto out;
2592
}
2593
2594
switch (sev_cmd.id) {
2595
case KVM_SEV_ES_INIT:
2596
if (!sev_es_enabled) {
2597
r = -ENOTTY;
2598
goto out;
2599
}
2600
fallthrough;
2601
case KVM_SEV_INIT:
2602
r = sev_guest_init(kvm, &sev_cmd);
2603
break;
2604
case KVM_SEV_INIT2:
2605
r = sev_guest_init2(kvm, &sev_cmd);
2606
break;
2607
case KVM_SEV_LAUNCH_START:
2608
r = sev_launch_start(kvm, &sev_cmd);
2609
break;
2610
case KVM_SEV_LAUNCH_UPDATE_DATA:
2611
r = sev_launch_update_data(kvm, &sev_cmd);
2612
break;
2613
case KVM_SEV_LAUNCH_UPDATE_VMSA:
2614
r = sev_launch_update_vmsa(kvm, &sev_cmd);
2615
break;
2616
case KVM_SEV_LAUNCH_MEASURE:
2617
r = sev_launch_measure(kvm, &sev_cmd);
2618
break;
2619
case KVM_SEV_LAUNCH_FINISH:
2620
r = sev_launch_finish(kvm, &sev_cmd);
2621
break;
2622
case KVM_SEV_GUEST_STATUS:
2623
r = sev_guest_status(kvm, &sev_cmd);
2624
break;
2625
case KVM_SEV_DBG_DECRYPT:
2626
r = sev_dbg_crypt(kvm, &sev_cmd, true);
2627
break;
2628
case KVM_SEV_DBG_ENCRYPT:
2629
r = sev_dbg_crypt(kvm, &sev_cmd, false);
2630
break;
2631
case KVM_SEV_LAUNCH_SECRET:
2632
r = sev_launch_secret(kvm, &sev_cmd);
2633
break;
2634
case KVM_SEV_GET_ATTESTATION_REPORT:
2635
r = sev_get_attestation_report(kvm, &sev_cmd);
2636
break;
2637
case KVM_SEV_SEND_START:
2638
r = sev_send_start(kvm, &sev_cmd);
2639
break;
2640
case KVM_SEV_SEND_UPDATE_DATA:
2641
r = sev_send_update_data(kvm, &sev_cmd);
2642
break;
2643
case KVM_SEV_SEND_FINISH:
2644
r = sev_send_finish(kvm, &sev_cmd);
2645
break;
2646
case KVM_SEV_SEND_CANCEL:
2647
r = sev_send_cancel(kvm, &sev_cmd);
2648
break;
2649
case KVM_SEV_RECEIVE_START:
2650
r = sev_receive_start(kvm, &sev_cmd);
2651
break;
2652
case KVM_SEV_RECEIVE_UPDATE_DATA:
2653
r = sev_receive_update_data(kvm, &sev_cmd);
2654
break;
2655
case KVM_SEV_RECEIVE_FINISH:
2656
r = sev_receive_finish(kvm, &sev_cmd);
2657
break;
2658
case KVM_SEV_SNP_LAUNCH_START:
2659
r = snp_launch_start(kvm, &sev_cmd);
2660
break;
2661
case KVM_SEV_SNP_LAUNCH_UPDATE:
2662
r = snp_launch_update(kvm, &sev_cmd);
2663
break;
2664
case KVM_SEV_SNP_LAUNCH_FINISH:
2665
r = snp_launch_finish(kvm, &sev_cmd);
2666
break;
2667
default:
2668
r = -EINVAL;
2669
goto out;
2670
}
2671
2672
if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
2673
r = -EFAULT;
2674
2675
out:
2676
mutex_unlock(&kvm->lock);
2677
return r;
2678
}
2679
2680
int sev_mem_enc_register_region(struct kvm *kvm,
2681
struct kvm_enc_region *range)
2682
{
2683
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2684
struct enc_region *region;
2685
int ret = 0;
2686
2687
if (!sev_guest(kvm))
2688
return -ENOTTY;
2689
2690
/* If kvm is mirroring encryption context it isn't responsible for it */
2691
if (is_mirroring_enc_context(kvm))
2692
return -EINVAL;
2693
2694
if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
2695
return -EINVAL;
2696
2697
region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
2698
if (!region)
2699
return -ENOMEM;
2700
2701
mutex_lock(&kvm->lock);
2702
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages,
2703
FOLL_WRITE | FOLL_LONGTERM);
2704
if (IS_ERR(region->pages)) {
2705
ret = PTR_ERR(region->pages);
2706
mutex_unlock(&kvm->lock);
2707
goto e_free;
2708
}
2709
2710
/*
2711
* The guest may change the memory encryption attribute from C=0 -> C=1
2712
* or vice versa for this memory range. Lets make sure caches are
2713
* flushed to ensure that guest data gets written into memory with
2714
* correct C-bit. Note, this must be done before dropping kvm->lock,
2715
* as region and its array of pages can be freed by a different task
2716
* once kvm->lock is released.
2717
*/
2718
sev_clflush_pages(region->pages, region->npages);
2719
2720
region->uaddr = range->addr;
2721
region->size = range->size;
2722
2723
list_add_tail(&region->list, &sev->regions_list);
2724
mutex_unlock(&kvm->lock);
2725
2726
return ret;
2727
2728
e_free:
2729
kfree(region);
2730
return ret;
2731
}
2732
2733
static struct enc_region *
2734
find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
2735
{
2736
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2737
struct list_head *head = &sev->regions_list;
2738
struct enc_region *i;
2739
2740
list_for_each_entry(i, head, list) {
2741
if (i->uaddr == range->addr &&
2742
i->size == range->size)
2743
return i;
2744
}
2745
2746
return NULL;
2747
}
2748
2749
static void __unregister_enc_region_locked(struct kvm *kvm,
2750
struct enc_region *region)
2751
{
2752
sev_unpin_memory(kvm, region->pages, region->npages);
2753
list_del(&region->list);
2754
kfree(region);
2755
}
2756
2757
int sev_mem_enc_unregister_region(struct kvm *kvm,
2758
struct kvm_enc_region *range)
2759
{
2760
struct enc_region *region;
2761
int ret;
2762
2763
/* If kvm is mirroring encryption context it isn't responsible for it */
2764
if (is_mirroring_enc_context(kvm))
2765
return -EINVAL;
2766
2767
mutex_lock(&kvm->lock);
2768
2769
if (!sev_guest(kvm)) {
2770
ret = -ENOTTY;
2771
goto failed;
2772
}
2773
2774
region = find_enc_region(kvm, range);
2775
if (!region) {
2776
ret = -EINVAL;
2777
goto failed;
2778
}
2779
2780
sev_writeback_caches(kvm);
2781
2782
__unregister_enc_region_locked(kvm, region);
2783
2784
mutex_unlock(&kvm->lock);
2785
return 0;
2786
2787
failed:
2788
mutex_unlock(&kvm->lock);
2789
return ret;
2790
}
2791
2792
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
2793
{
2794
CLASS(fd, f)(source_fd);
2795
struct kvm *source_kvm;
2796
struct kvm_sev_info *source_sev, *mirror_sev;
2797
int ret;
2798
2799
if (fd_empty(f))
2800
return -EBADF;
2801
2802
if (!file_is_kvm(fd_file(f)))
2803
return -EBADF;
2804
2805
source_kvm = fd_file(f)->private_data;
2806
ret = sev_lock_two_vms(kvm, source_kvm);
2807
if (ret)
2808
return ret;
2809
2810
/*
2811
* Mirrors of mirrors should work, but let's not get silly. Also
2812
* disallow out-of-band SEV/SEV-ES init if the target is already an
2813
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
2814
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
2815
*/
2816
if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2817
is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
2818
ret = -EINVAL;
2819
goto e_unlock;
2820
}
2821
2822
mirror_sev = to_kvm_sev_info(kvm);
2823
if (!zalloc_cpumask_var(&mirror_sev->have_run_cpus, GFP_KERNEL_ACCOUNT)) {
2824
ret = -ENOMEM;
2825
goto e_unlock;
2826
}
2827
2828
/*
2829
* The mirror kvm holds an enc_context_owner ref so its asid can't
2830
* disappear until we're done with it
2831
*/
2832
source_sev = to_kvm_sev_info(source_kvm);
2833
kvm_get_kvm(source_kvm);
2834
list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
2835
2836
/* Set enc_context_owner and copy its encryption context over */
2837
mirror_sev->enc_context_owner = source_kvm;
2838
mirror_sev->active = true;
2839
mirror_sev->asid = source_sev->asid;
2840
mirror_sev->fd = source_sev->fd;
2841
mirror_sev->es_active = source_sev->es_active;
2842
mirror_sev->need_init = false;
2843
mirror_sev->handle = source_sev->handle;
2844
INIT_LIST_HEAD(&mirror_sev->regions_list);
2845
INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2846
ret = 0;
2847
2848
/*
2849
* Do not copy ap_jump_table. Since the mirror does not share the same
2850
* KVM contexts as the original, and they may have different
2851
* memory-views.
2852
*/
2853
2854
e_unlock:
2855
sev_unlock_two_vms(kvm, source_kvm);
2856
return ret;
2857
}
2858
2859
static int snp_decommission_context(struct kvm *kvm)
2860
{
2861
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2862
struct sev_data_snp_addr data = {};
2863
int ret;
2864
2865
/* If context is not created then do nothing */
2866
if (!sev->snp_context)
2867
return 0;
2868
2869
/* Do the decommision, which will unbind the ASID from the SNP context */
2870
data.address = __sme_pa(sev->snp_context);
2871
down_write(&sev_deactivate_lock);
2872
ret = sev_do_cmd(SEV_CMD_SNP_DECOMMISSION, &data, NULL);
2873
up_write(&sev_deactivate_lock);
2874
2875
if (WARN_ONCE(ret, "Failed to release guest context, ret %d", ret))
2876
return ret;
2877
2878
snp_free_firmware_page(sev->snp_context);
2879
sev->snp_context = NULL;
2880
2881
return 0;
2882
}
2883
2884
void sev_vm_destroy(struct kvm *kvm)
2885
{
2886
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
2887
struct list_head *head = &sev->regions_list;
2888
struct list_head *pos, *q;
2889
2890
if (!sev_guest(kvm))
2891
return;
2892
2893
WARN_ON(!list_empty(&sev->mirror_vms));
2894
2895
free_cpumask_var(sev->have_run_cpus);
2896
2897
/*
2898
* If this is a mirror VM, remove it from the owner's list of a mirrors
2899
* and skip ASID cleanup (the ASID is tied to the lifetime of the owner).
2900
* Note, mirror VMs don't support registering encrypted regions.
2901
*/
2902
if (is_mirroring_enc_context(kvm)) {
2903
struct kvm *owner_kvm = sev->enc_context_owner;
2904
2905
mutex_lock(&owner_kvm->lock);
2906
list_del(&sev->mirror_entry);
2907
mutex_unlock(&owner_kvm->lock);
2908
kvm_put_kvm(owner_kvm);
2909
return;
2910
}
2911
2912
2913
/*
2914
* if userspace was terminated before unregistering the memory regions
2915
* then lets unpin all the registered memory.
2916
*/
2917
if (!list_empty(head)) {
2918
list_for_each_safe(pos, q, head) {
2919
__unregister_enc_region_locked(kvm,
2920
list_entry(pos, struct enc_region, list));
2921
cond_resched();
2922
}
2923
}
2924
2925
if (sev_snp_guest(kvm)) {
2926
snp_guest_req_cleanup(kvm);
2927
2928
/*
2929
* Decomission handles unbinding of the ASID. If it fails for
2930
* some unexpected reason, just leak the ASID.
2931
*/
2932
if (snp_decommission_context(kvm))
2933
return;
2934
} else {
2935
sev_unbind_asid(kvm, sev->handle);
2936
}
2937
2938
sev_asid_free(sev);
2939
}
2940
2941
void __init sev_set_cpu_caps(void)
2942
{
2943
if (sev_enabled) {
2944
kvm_cpu_cap_set(X86_FEATURE_SEV);
2945
kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_VM);
2946
}
2947
if (sev_es_enabled) {
2948
kvm_cpu_cap_set(X86_FEATURE_SEV_ES);
2949
kvm_caps.supported_vm_types |= BIT(KVM_X86_SEV_ES_VM);
2950
}
2951
if (sev_snp_enabled) {
2952
kvm_cpu_cap_set(X86_FEATURE_SEV_SNP);
2953
kvm_caps.supported_vm_types |= BIT(KVM_X86_SNP_VM);
2954
}
2955
}
2956
2957
static bool is_sev_snp_initialized(void)
2958
{
2959
struct sev_user_data_snp_status *status;
2960
struct sev_data_snp_addr buf;
2961
bool initialized = false;
2962
int ret, error = 0;
2963
2964
status = snp_alloc_firmware_page(GFP_KERNEL | __GFP_ZERO);
2965
if (!status)
2966
return false;
2967
2968
buf.address = __psp_pa(status);
2969
ret = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &error);
2970
if (ret) {
2971
pr_err("SEV: SNP_PLATFORM_STATUS failed ret=%d, fw_error=%d (%#x)\n",
2972
ret, error, error);
2973
goto out;
2974
}
2975
2976
initialized = !!status->state;
2977
2978
out:
2979
snp_free_firmware_page(status);
2980
2981
return initialized;
2982
}
2983
2984
void __init sev_hardware_setup(void)
2985
{
2986
unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2987
struct sev_platform_init_args init_args = {0};
2988
bool sev_snp_supported = false;
2989
bool sev_es_supported = false;
2990
bool sev_supported = false;
2991
2992
if (!sev_enabled || !npt_enabled || !nrips)
2993
goto out;
2994
2995
/*
2996
* SEV must obviously be supported in hardware. Sanity check that the
2997
* CPU supports decode assists, which is mandatory for SEV guests to
2998
* support instruction emulation. Ditto for flushing by ASID, as SEV
2999
* guests are bound to a single ASID, i.e. KVM can't rotate to a new
3000
* ASID to effect a TLB flush.
3001
*/
3002
if (!boot_cpu_has(X86_FEATURE_SEV) ||
3003
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) ||
3004
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_FLUSHBYASID)))
3005
goto out;
3006
3007
/*
3008
* The kernel's initcall infrastructure lacks the ability to express
3009
* dependencies between initcalls, whereas the modules infrastructure
3010
* automatically handles dependencies via symbol loading. Ensure the
3011
* PSP SEV driver is initialized before proceeding if KVM is built-in,
3012
* as the dependency isn't handled by the initcall infrastructure.
3013
*/
3014
if (IS_BUILTIN(CONFIG_KVM_AMD) && sev_module_init())
3015
goto out;
3016
3017
/* Retrieve SEV CPUID information */
3018
cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
3019
3020
/* Set encryption bit location for SEV-ES guests */
3021
sev_enc_bit = ebx & 0x3f;
3022
3023
/* Maximum number of encrypted guests supported simultaneously */
3024
max_sev_asid = ecx;
3025
if (!max_sev_asid)
3026
goto out;
3027
3028
/* Minimum ASID value that should be used for SEV guest */
3029
min_sev_asid = edx;
3030
sev_me_mask = 1UL << (ebx & 0x3f);
3031
3032
/*
3033
* Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
3034
* even though it's never used, so that the bitmap is indexed by the
3035
* actual ASID.
3036
*/
3037
nr_asids = max_sev_asid + 1;
3038
sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
3039
if (!sev_asid_bitmap)
3040
goto out;
3041
3042
sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
3043
if (!sev_reclaim_asid_bitmap) {
3044
bitmap_free(sev_asid_bitmap);
3045
sev_asid_bitmap = NULL;
3046
goto out;
3047
}
3048
3049
if (min_sev_asid <= max_sev_asid) {
3050
sev_asid_count = max_sev_asid - min_sev_asid + 1;
3051
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
3052
}
3053
sev_supported = true;
3054
3055
/* SEV-ES support requested? */
3056
if (!sev_es_enabled)
3057
goto out;
3058
3059
/*
3060
* SEV-ES requires MMIO caching as KVM doesn't have access to the guest
3061
* instruction stream, i.e. can't emulate in response to a #NPF and
3062
* instead relies on #NPF(RSVD) being reflected into the guest as #VC
3063
* (the guest can then do a #VMGEXIT to request MMIO emulation).
3064
*/
3065
if (!enable_mmio_caching)
3066
goto out;
3067
3068
/* Does the CPU support SEV-ES? */
3069
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
3070
goto out;
3071
3072
if (!lbrv) {
3073
WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
3074
"LBRV must be present for SEV-ES support");
3075
goto out;
3076
}
3077
3078
/* Has the system been allocated ASIDs for SEV-ES? */
3079
if (min_sev_asid == 1)
3080
goto out;
3081
3082
min_sev_es_asid = min_snp_asid = 1;
3083
max_sev_es_asid = max_snp_asid = min_sev_asid - 1;
3084
3085
sev_es_asid_count = min_sev_asid - 1;
3086
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
3087
sev_es_supported = true;
3088
sev_snp_supported = sev_snp_enabled && cc_platform_has(CC_ATTR_HOST_SEV_SNP);
3089
3090
out:
3091
if (sev_enabled) {
3092
init_args.probe = true;
3093
3094
if (sev_is_snp_ciphertext_hiding_supported())
3095
init_args.max_snp_asid = min(nr_ciphertext_hiding_asids,
3096
min_sev_asid - 1);
3097
3098
if (sev_platform_init(&init_args))
3099
sev_supported = sev_es_supported = sev_snp_supported = false;
3100
else if (sev_snp_supported)
3101
sev_snp_supported = is_sev_snp_initialized();
3102
3103
if (sev_snp_supported)
3104
nr_ciphertext_hiding_asids = init_args.max_snp_asid;
3105
3106
/*
3107
* If ciphertext hiding is enabled, the joint SEV-ES/SEV-SNP
3108
* ASID range is partitioned into separate SEV-ES and SEV-SNP
3109
* ASID ranges, with the SEV-SNP range being [1..max_snp_asid]
3110
* and the SEV-ES range being (max_snp_asid..max_sev_es_asid].
3111
* Note, SEV-ES may effectively be disabled if all ASIDs from
3112
* the joint range are assigned to SEV-SNP.
3113
*/
3114
if (nr_ciphertext_hiding_asids) {
3115
max_snp_asid = nr_ciphertext_hiding_asids;
3116
min_sev_es_asid = max_snp_asid + 1;
3117
pr_info("SEV-SNP ciphertext hiding enabled\n");
3118
}
3119
}
3120
3121
if (boot_cpu_has(X86_FEATURE_SEV))
3122
pr_info("SEV %s (ASIDs %u - %u)\n",
3123
sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
3124
"unusable" :
3125
"disabled",
3126
min_sev_asid, max_sev_asid);
3127
if (boot_cpu_has(X86_FEATURE_SEV_ES))
3128
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
3129
sev_es_supported ? min_sev_es_asid <= max_sev_es_asid ? "enabled" :
3130
"unusable" :
3131
"disabled",
3132
min_sev_es_asid, max_sev_es_asid);
3133
if (boot_cpu_has(X86_FEATURE_SEV_SNP))
3134
pr_info("SEV-SNP %s (ASIDs %u - %u)\n",
3135
str_enabled_disabled(sev_snp_supported),
3136
min_snp_asid, max_snp_asid);
3137
3138
sev_enabled = sev_supported;
3139
sev_es_enabled = sev_es_supported;
3140
sev_snp_enabled = sev_snp_supported;
3141
3142
if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
3143
!cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
3144
sev_es_debug_swap_enabled = false;
3145
3146
sev_supported_vmsa_features = 0;
3147
if (sev_es_debug_swap_enabled)
3148
sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
3149
3150
if (sev_snp_enabled && tsc_khz && cpu_feature_enabled(X86_FEATURE_SNP_SECURE_TSC))
3151
sev_supported_vmsa_features |= SVM_SEV_FEAT_SECURE_TSC;
3152
}
3153
3154
void sev_hardware_unsetup(void)
3155
{
3156
if (!sev_enabled)
3157
return;
3158
3159
/* No need to take sev_bitmap_lock, all VMs have been destroyed. */
3160
sev_flush_asids(1, max_sev_asid);
3161
3162
bitmap_free(sev_asid_bitmap);
3163
bitmap_free(sev_reclaim_asid_bitmap);
3164
3165
misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
3166
misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
3167
3168
sev_platform_shutdown();
3169
}
3170
3171
int sev_cpu_init(struct svm_cpu_data *sd)
3172
{
3173
if (!sev_enabled)
3174
return 0;
3175
3176
sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
3177
if (!sd->sev_vmcbs)
3178
return -ENOMEM;
3179
3180
return 0;
3181
}
3182
3183
/*
3184
* Pages used by hardware to hold guest encrypted state must be flushed before
3185
* returning them to the system.
3186
*/
3187
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
3188
{
3189
unsigned int asid = sev_get_asid(vcpu->kvm);
3190
3191
/*
3192
* Note! The address must be a kernel address, as regular page walk
3193
* checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
3194
* address is non-deterministic and unsafe. This function deliberately
3195
* takes a pointer to deter passing in a user address.
3196
*/
3197
unsigned long addr = (unsigned long)va;
3198
3199
/*
3200
* If CPU enforced cache coherency for encrypted mappings of the
3201
* same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
3202
* flush is still needed in order to work properly with DMA devices.
3203
*/
3204
if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
3205
clflush_cache_range(va, PAGE_SIZE);
3206
return;
3207
}
3208
3209
/*
3210
* VM Page Flush takes a host virtual address and a guest ASID. Fall
3211
* back to full writeback of caches if this faults so as not to make
3212
* any problems worse by leaving stale encrypted data in the cache.
3213
*/
3214
if (WARN_ON_ONCE(wrmsrq_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
3215
goto do_sev_writeback_caches;
3216
3217
return;
3218
3219
do_sev_writeback_caches:
3220
sev_writeback_caches(vcpu->kvm);
3221
}
3222
3223
void sev_guest_memory_reclaimed(struct kvm *kvm)
3224
{
3225
/*
3226
* With SNP+gmem, private/encrypted memory is unreachable via the
3227
* hva-based mmu notifiers, i.e. these events are explicitly scoped to
3228
* shared pages, where there's no need to flush caches.
3229
*/
3230
if (!sev_guest(kvm) || sev_snp_guest(kvm))
3231
return;
3232
3233
sev_writeback_caches(kvm);
3234
}
3235
3236
void sev_free_vcpu(struct kvm_vcpu *vcpu)
3237
{
3238
struct vcpu_svm *svm;
3239
3240
if (!sev_es_guest(vcpu->kvm))
3241
return;
3242
3243
svm = to_svm(vcpu);
3244
3245
/*
3246
* If it's an SNP guest, then the VMSA was marked in the RMP table as
3247
* a guest-owned page. Transition the page to hypervisor state before
3248
* releasing it back to the system.
3249
*/
3250
if (sev_snp_guest(vcpu->kvm)) {
3251
u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
3252
3253
if (kvm_rmp_make_shared(vcpu->kvm, pfn, PG_LEVEL_4K))
3254
goto skip_vmsa_free;
3255
}
3256
3257
if (vcpu->arch.guest_state_protected)
3258
sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
3259
3260
__free_page(virt_to_page(svm->sev_es.vmsa));
3261
3262
skip_vmsa_free:
3263
if (svm->sev_es.ghcb_sa_free)
3264
kvfree(svm->sev_es.ghcb_sa);
3265
}
3266
3267
static u64 kvm_get_cached_sw_exit_code(struct vmcb_control_area *control)
3268
{
3269
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
3270
}
3271
3272
static void dump_ghcb(struct vcpu_svm *svm)
3273
{
3274
struct vmcb_control_area *control = &svm->vmcb->control;
3275
unsigned int nbits;
3276
3277
/* Re-use the dump_invalid_vmcb module parameter */
3278
if (!dump_invalid_vmcb) {
3279
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3280
return;
3281
}
3282
3283
nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
3284
3285
/*
3286
* Print KVM's snapshot of the GHCB values that were (unsuccessfully)
3287
* used to handle the exit. If the guest has since modified the GHCB
3288
* itself, dumping the raw GHCB won't help debug why KVM was unable to
3289
* handle the VMGEXIT that KVM observed.
3290
*/
3291
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
3292
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
3293
kvm_get_cached_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
3294
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
3295
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
3296
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
3297
control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
3298
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
3299
svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
3300
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
3301
}
3302
3303
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
3304
{
3305
struct kvm_vcpu *vcpu = &svm->vcpu;
3306
struct ghcb *ghcb = svm->sev_es.ghcb;
3307
3308
/*
3309
* The GHCB protocol so far allows for the following data
3310
* to be returned:
3311
* GPRs RAX, RBX, RCX, RDX
3312
*
3313
* Copy their values, even if they may not have been written during the
3314
* VM-Exit. It's the guest's responsibility to not consume random data.
3315
*/
3316
ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
3317
ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
3318
ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
3319
ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
3320
}
3321
3322
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
3323
{
3324
struct vmcb_control_area *control = &svm->vmcb->control;
3325
struct kvm_vcpu *vcpu = &svm->vcpu;
3326
struct ghcb *ghcb = svm->sev_es.ghcb;
3327
u64 exit_code;
3328
3329
/*
3330
* The GHCB protocol so far allows for the following data
3331
* to be supplied:
3332
* GPRs RAX, RBX, RCX, RDX
3333
* XCR0
3334
* CPL
3335
*
3336
* VMMCALL allows the guest to provide extra registers. KVM also
3337
* expects RSI for hypercalls, so include that, too.
3338
*
3339
* Copy their values to the appropriate location if supplied.
3340
*/
3341
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
3342
3343
BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
3344
memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
3345
3346
vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm);
3347
vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm);
3348
vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm);
3349
vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm);
3350
vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm);
3351
3352
svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm);
3353
3354
if (kvm_ghcb_xcr0_is_valid(svm))
3355
__kvm_set_xcr(vcpu, 0, kvm_ghcb_get_xcr0(svm));
3356
3357
if (kvm_ghcb_xss_is_valid(svm))
3358
__kvm_emulate_msr_write(vcpu, MSR_IA32_XSS, kvm_ghcb_get_xss(svm));
3359
3360
/* Copy the GHCB exit information into the VMCB fields */
3361
exit_code = kvm_ghcb_get_sw_exit_code(svm);
3362
control->exit_code = lower_32_bits(exit_code);
3363
control->exit_code_hi = upper_32_bits(exit_code);
3364
control->exit_info_1 = kvm_ghcb_get_sw_exit_info_1(svm);
3365
control->exit_info_2 = kvm_ghcb_get_sw_exit_info_2(svm);
3366
svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm);
3367
3368
/* Clear the valid entries fields */
3369
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
3370
}
3371
3372
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
3373
{
3374
struct vmcb_control_area *control = &svm->vmcb->control;
3375
struct kvm_vcpu *vcpu = &svm->vcpu;
3376
u64 exit_code;
3377
u64 reason;
3378
3379
/*
3380
* Retrieve the exit code now even though it may not be marked valid
3381
* as it could help with debugging.
3382
*/
3383
exit_code = kvm_get_cached_sw_exit_code(control);
3384
3385
/* Only GHCB Usage code 0 is supported */
3386
if (svm->sev_es.ghcb->ghcb_usage) {
3387
reason = GHCB_ERR_INVALID_USAGE;
3388
goto vmgexit_err;
3389
}
3390
3391
reason = GHCB_ERR_MISSING_INPUT;
3392
3393
if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
3394
!kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
3395
!kvm_ghcb_sw_exit_info_2_is_valid(svm))
3396
goto vmgexit_err;
3397
3398
switch (exit_code) {
3399
case SVM_EXIT_READ_DR7:
3400
break;
3401
case SVM_EXIT_WRITE_DR7:
3402
if (!kvm_ghcb_rax_is_valid(svm))
3403
goto vmgexit_err;
3404
break;
3405
case SVM_EXIT_RDTSC:
3406
break;
3407
case SVM_EXIT_RDPMC:
3408
if (!kvm_ghcb_rcx_is_valid(svm))
3409
goto vmgexit_err;
3410
break;
3411
case SVM_EXIT_CPUID:
3412
if (!kvm_ghcb_rax_is_valid(svm) ||
3413
!kvm_ghcb_rcx_is_valid(svm))
3414
goto vmgexit_err;
3415
if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
3416
if (!kvm_ghcb_xcr0_is_valid(svm))
3417
goto vmgexit_err;
3418
break;
3419
case SVM_EXIT_INVD:
3420
break;
3421
case SVM_EXIT_IOIO:
3422
if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
3423
if (!kvm_ghcb_sw_scratch_is_valid(svm))
3424
goto vmgexit_err;
3425
} else {
3426
if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
3427
if (!kvm_ghcb_rax_is_valid(svm))
3428
goto vmgexit_err;
3429
}
3430
break;
3431
case SVM_EXIT_MSR:
3432
if (!kvm_ghcb_rcx_is_valid(svm))
3433
goto vmgexit_err;
3434
if (control->exit_info_1) {
3435
if (!kvm_ghcb_rax_is_valid(svm) ||
3436
!kvm_ghcb_rdx_is_valid(svm))
3437
goto vmgexit_err;
3438
}
3439
break;
3440
case SVM_EXIT_VMMCALL:
3441
if (!kvm_ghcb_rax_is_valid(svm) ||
3442
!kvm_ghcb_cpl_is_valid(svm))
3443
goto vmgexit_err;
3444
break;
3445
case SVM_EXIT_RDTSCP:
3446
break;
3447
case SVM_EXIT_WBINVD:
3448
break;
3449
case SVM_EXIT_MONITOR:
3450
if (!kvm_ghcb_rax_is_valid(svm) ||
3451
!kvm_ghcb_rcx_is_valid(svm) ||
3452
!kvm_ghcb_rdx_is_valid(svm))
3453
goto vmgexit_err;
3454
break;
3455
case SVM_EXIT_MWAIT:
3456
if (!kvm_ghcb_rax_is_valid(svm) ||
3457
!kvm_ghcb_rcx_is_valid(svm))
3458
goto vmgexit_err;
3459
break;
3460
case SVM_VMGEXIT_MMIO_READ:
3461
case SVM_VMGEXIT_MMIO_WRITE:
3462
if (!kvm_ghcb_sw_scratch_is_valid(svm))
3463
goto vmgexit_err;
3464
break;
3465
case SVM_VMGEXIT_AP_CREATION:
3466
if (!sev_snp_guest(vcpu->kvm))
3467
goto vmgexit_err;
3468
if (lower_32_bits(control->exit_info_1) != SVM_VMGEXIT_AP_DESTROY)
3469
if (!kvm_ghcb_rax_is_valid(svm))
3470
goto vmgexit_err;
3471
break;
3472
case SVM_VMGEXIT_NMI_COMPLETE:
3473
case SVM_VMGEXIT_AP_HLT_LOOP:
3474
case SVM_VMGEXIT_AP_JUMP_TABLE:
3475
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
3476
case SVM_VMGEXIT_HV_FEATURES:
3477
case SVM_VMGEXIT_TERM_REQUEST:
3478
break;
3479
case SVM_VMGEXIT_PSC:
3480
if (!sev_snp_guest(vcpu->kvm) || !kvm_ghcb_sw_scratch_is_valid(svm))
3481
goto vmgexit_err;
3482
break;
3483
case SVM_VMGEXIT_GUEST_REQUEST:
3484
case SVM_VMGEXIT_EXT_GUEST_REQUEST:
3485
if (!sev_snp_guest(vcpu->kvm) ||
3486
!PAGE_ALIGNED(control->exit_info_1) ||
3487
!PAGE_ALIGNED(control->exit_info_2) ||
3488
control->exit_info_1 == control->exit_info_2)
3489
goto vmgexit_err;
3490
break;
3491
default:
3492
reason = GHCB_ERR_INVALID_EVENT;
3493
goto vmgexit_err;
3494
}
3495
3496
return 0;
3497
3498
vmgexit_err:
3499
if (reason == GHCB_ERR_INVALID_USAGE) {
3500
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
3501
svm->sev_es.ghcb->ghcb_usage);
3502
} else if (reason == GHCB_ERR_INVALID_EVENT) {
3503
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
3504
exit_code);
3505
} else {
3506
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
3507
exit_code);
3508
dump_ghcb(svm);
3509
}
3510
3511
svm_vmgexit_bad_input(svm, reason);
3512
3513
/* Resume the guest to "return" the error code. */
3514
return 1;
3515
}
3516
3517
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
3518
{
3519
/* Clear any indication that the vCPU is in a type of AP Reset Hold */
3520
svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NONE;
3521
3522
if (!svm->sev_es.ghcb)
3523
return;
3524
3525
if (svm->sev_es.ghcb_sa_free) {
3526
/*
3527
* The scratch area lives outside the GHCB, so there is a
3528
* buffer that, depending on the operation performed, may
3529
* need to be synced, then freed.
3530
*/
3531
if (svm->sev_es.ghcb_sa_sync) {
3532
kvm_write_guest(svm->vcpu.kvm,
3533
svm->sev_es.sw_scratch,
3534
svm->sev_es.ghcb_sa,
3535
svm->sev_es.ghcb_sa_len);
3536
svm->sev_es.ghcb_sa_sync = false;
3537
}
3538
3539
kvfree(svm->sev_es.ghcb_sa);
3540
svm->sev_es.ghcb_sa = NULL;
3541
svm->sev_es.ghcb_sa_free = false;
3542
}
3543
3544
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
3545
3546
sev_es_sync_to_ghcb(svm);
3547
3548
kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map);
3549
svm->sev_es.ghcb = NULL;
3550
}
3551
3552
int pre_sev_run(struct vcpu_svm *svm, int cpu)
3553
{
3554
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
3555
struct kvm *kvm = svm->vcpu.kvm;
3556
unsigned int asid = sev_get_asid(kvm);
3557
3558
/*
3559
* Reject KVM_RUN if userspace attempts to run the vCPU with an invalid
3560
* VMSA, e.g. if userspace forces the vCPU to be RUNNABLE after an SNP
3561
* AP Destroy event.
3562
*/
3563
if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa))
3564
return -EINVAL;
3565
3566
/*
3567
* To optimize cache flushes when memory is reclaimed from an SEV VM,
3568
* track physical CPUs that enter the guest for SEV VMs and thus can
3569
* have encrypted, dirty data in the cache, and flush caches only for
3570
* CPUs that have entered the guest.
3571
*/
3572
if (!cpumask_test_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus))
3573
cpumask_set_cpu(cpu, to_kvm_sev_info(kvm)->have_run_cpus);
3574
3575
/* Assign the asid allocated with this SEV guest */
3576
svm->asid = asid;
3577
3578
/*
3579
* Flush guest TLB:
3580
*
3581
* 1) when different VMCB for the same ASID is to be run on the same host CPU.
3582
* 2) or this VMCB was executed on different host CPU in previous VMRUNs.
3583
*/
3584
if (sd->sev_vmcbs[asid] == svm->vmcb &&
3585
svm->vcpu.arch.last_vmentry_cpu == cpu)
3586
return 0;
3587
3588
sd->sev_vmcbs[asid] = svm->vmcb;
3589
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3590
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
3591
return 0;
3592
}
3593
3594
#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
3595
static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
3596
{
3597
struct vmcb_control_area *control = &svm->vmcb->control;
3598
u64 ghcb_scratch_beg, ghcb_scratch_end;
3599
u64 scratch_gpa_beg, scratch_gpa_end;
3600
void *scratch_va;
3601
3602
scratch_gpa_beg = svm->sev_es.sw_scratch;
3603
if (!scratch_gpa_beg) {
3604
pr_err("vmgexit: scratch gpa not provided\n");
3605
goto e_scratch;
3606
}
3607
3608
scratch_gpa_end = scratch_gpa_beg + len;
3609
if (scratch_gpa_end < scratch_gpa_beg) {
3610
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
3611
len, scratch_gpa_beg);
3612
goto e_scratch;
3613
}
3614
3615
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
3616
/* Scratch area begins within GHCB */
3617
ghcb_scratch_beg = control->ghcb_gpa +
3618
offsetof(struct ghcb, shared_buffer);
3619
ghcb_scratch_end = control->ghcb_gpa +
3620
offsetof(struct ghcb, reserved_0xff0);
3621
3622
/*
3623
* If the scratch area begins within the GHCB, it must be
3624
* completely contained in the GHCB shared buffer area.
3625
*/
3626
if (scratch_gpa_beg < ghcb_scratch_beg ||
3627
scratch_gpa_end > ghcb_scratch_end) {
3628
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
3629
scratch_gpa_beg, scratch_gpa_end);
3630
goto e_scratch;
3631
}
3632
3633
scratch_va = (void *)svm->sev_es.ghcb;
3634
scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
3635
} else {
3636
/*
3637
* The guest memory must be read into a kernel buffer, so
3638
* limit the size
3639
*/
3640
if (len > GHCB_SCRATCH_AREA_LIMIT) {
3641
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
3642
len, GHCB_SCRATCH_AREA_LIMIT);
3643
goto e_scratch;
3644
}
3645
scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
3646
if (!scratch_va)
3647
return -ENOMEM;
3648
3649
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
3650
/* Unable to copy scratch area from guest */
3651
pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
3652
3653
kvfree(scratch_va);
3654
return -EFAULT;
3655
}
3656
3657
/*
3658
* The scratch area is outside the GHCB. The operation will
3659
* dictate whether the buffer needs to be synced before running
3660
* the vCPU next time (i.e. a read was requested so the data
3661
* must be written back to the guest memory).
3662
*/
3663
svm->sev_es.ghcb_sa_sync = sync;
3664
svm->sev_es.ghcb_sa_free = true;
3665
}
3666
3667
svm->sev_es.ghcb_sa = scratch_va;
3668
svm->sev_es.ghcb_sa_len = len;
3669
3670
return 0;
3671
3672
e_scratch:
3673
svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_SCRATCH_AREA);
3674
3675
return 1;
3676
}
3677
3678
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
3679
unsigned int pos)
3680
{
3681
svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
3682
svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
3683
}
3684
3685
static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
3686
{
3687
return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
3688
}
3689
3690
static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
3691
{
3692
svm->vmcb->control.ghcb_gpa = value;
3693
}
3694
3695
static int snp_rmptable_psmash(kvm_pfn_t pfn)
3696
{
3697
int ret;
3698
3699
pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
3700
3701
/*
3702
* PSMASH_FAIL_INUSE indicates another processor is modifying the
3703
* entry, so retry until that's no longer the case.
3704
*/
3705
do {
3706
ret = psmash(pfn);
3707
} while (ret == PSMASH_FAIL_INUSE);
3708
3709
return ret;
3710
}
3711
3712
static int snp_complete_psc_msr(struct kvm_vcpu *vcpu)
3713
{
3714
struct vcpu_svm *svm = to_svm(vcpu);
3715
3716
if (vcpu->run->hypercall.ret)
3717
set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
3718
else
3719
set_ghcb_msr(svm, GHCB_MSR_PSC_RESP);
3720
3721
return 1; /* resume guest */
3722
}
3723
3724
static int snp_begin_psc_msr(struct vcpu_svm *svm, u64 ghcb_msr)
3725
{
3726
u64 gpa = gfn_to_gpa(GHCB_MSR_PSC_REQ_TO_GFN(ghcb_msr));
3727
u8 op = GHCB_MSR_PSC_REQ_TO_OP(ghcb_msr);
3728
struct kvm_vcpu *vcpu = &svm->vcpu;
3729
3730
if (op != SNP_PAGE_STATE_PRIVATE && op != SNP_PAGE_STATE_SHARED) {
3731
set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
3732
return 1; /* resume guest */
3733
}
3734
3735
if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
3736
set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
3737
return 1; /* resume guest */
3738
}
3739
3740
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
3741
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
3742
/*
3743
* In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
3744
* assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
3745
* it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
3746
* vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
3747
*/
3748
vcpu->run->hypercall.ret = 0;
3749
vcpu->run->hypercall.args[0] = gpa;
3750
vcpu->run->hypercall.args[1] = 1;
3751
vcpu->run->hypercall.args[2] = (op == SNP_PAGE_STATE_PRIVATE)
3752
? KVM_MAP_GPA_RANGE_ENCRYPTED
3753
: KVM_MAP_GPA_RANGE_DECRYPTED;
3754
vcpu->run->hypercall.args[2] |= KVM_MAP_GPA_RANGE_PAGE_SZ_4K;
3755
3756
vcpu->arch.complete_userspace_io = snp_complete_psc_msr;
3757
3758
return 0; /* forward request to userspace */
3759
}
3760
3761
struct psc_buffer {
3762
struct psc_hdr hdr;
3763
struct psc_entry entries[];
3764
} __packed;
3765
3766
static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc);
3767
3768
static void snp_complete_psc(struct vcpu_svm *svm, u64 psc_ret)
3769
{
3770
svm->sev_es.psc_inflight = 0;
3771
svm->sev_es.psc_idx = 0;
3772
svm->sev_es.psc_2m = false;
3773
3774
/*
3775
* PSC requests always get a "no action" response in SW_EXITINFO1, with
3776
* a PSC-specific return code in SW_EXITINFO2 that provides the "real"
3777
* return code. E.g. if the PSC request was interrupted, the need to
3778
* retry is communicated via SW_EXITINFO2, not SW_EXITINFO1.
3779
*/
3780
svm_vmgexit_no_action(svm, psc_ret);
3781
}
3782
3783
static void __snp_complete_one_psc(struct vcpu_svm *svm)
3784
{
3785
struct psc_buffer *psc = svm->sev_es.ghcb_sa;
3786
struct psc_entry *entries = psc->entries;
3787
struct psc_hdr *hdr = &psc->hdr;
3788
__u16 idx;
3789
3790
/*
3791
* Everything in-flight has been processed successfully. Update the
3792
* corresponding entries in the guest's PSC buffer and zero out the
3793
* count of in-flight PSC entries.
3794
*/
3795
for (idx = svm->sev_es.psc_idx; svm->sev_es.psc_inflight;
3796
svm->sev_es.psc_inflight--, idx++) {
3797
struct psc_entry *entry = &entries[idx];
3798
3799
entry->cur_page = entry->pagesize ? 512 : 1;
3800
}
3801
3802
hdr->cur_entry = idx;
3803
}
3804
3805
static int snp_complete_one_psc(struct kvm_vcpu *vcpu)
3806
{
3807
struct vcpu_svm *svm = to_svm(vcpu);
3808
struct psc_buffer *psc = svm->sev_es.ghcb_sa;
3809
3810
if (vcpu->run->hypercall.ret) {
3811
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
3812
return 1; /* resume guest */
3813
}
3814
3815
__snp_complete_one_psc(svm);
3816
3817
/* Handle the next range (if any). */
3818
return snp_begin_psc(svm, psc);
3819
}
3820
3821
static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
3822
{
3823
struct psc_entry *entries = psc->entries;
3824
struct kvm_vcpu *vcpu = &svm->vcpu;
3825
struct psc_hdr *hdr = &psc->hdr;
3826
struct psc_entry entry_start;
3827
u16 idx, idx_start, idx_end;
3828
int npages;
3829
bool huge;
3830
u64 gfn;
3831
3832
if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
3833
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
3834
return 1;
3835
}
3836
3837
next_range:
3838
/* There should be no other PSCs in-flight at this point. */
3839
if (WARN_ON_ONCE(svm->sev_es.psc_inflight)) {
3840
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
3841
return 1;
3842
}
3843
3844
/*
3845
* The PSC descriptor buffer can be modified by a misbehaved guest after
3846
* validation, so take care to only use validated copies of values used
3847
* for things like array indexing.
3848
*/
3849
idx_start = hdr->cur_entry;
3850
idx_end = hdr->end_entry;
3851
3852
if (idx_end >= VMGEXIT_PSC_MAX_COUNT) {
3853
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_INVALID_HDR);
3854
return 1;
3855
}
3856
3857
/* Find the start of the next range which needs processing. */
3858
for (idx = idx_start; idx <= idx_end; idx++, hdr->cur_entry++) {
3859
entry_start = entries[idx];
3860
3861
gfn = entry_start.gfn;
3862
huge = entry_start.pagesize;
3863
npages = huge ? 512 : 1;
3864
3865
if (entry_start.cur_page > npages || !IS_ALIGNED(gfn, npages)) {
3866
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_INVALID_ENTRY);
3867
return 1;
3868
}
3869
3870
if (entry_start.cur_page) {
3871
/*
3872
* If this is a partially-completed 2M range, force 4K handling
3873
* for the remaining pages since they're effectively split at
3874
* this point. Subsequent code should ensure this doesn't get
3875
* combined with adjacent PSC entries where 2M handling is still
3876
* possible.
3877
*/
3878
npages -= entry_start.cur_page;
3879
gfn += entry_start.cur_page;
3880
huge = false;
3881
}
3882
3883
if (npages)
3884
break;
3885
}
3886
3887
if (idx > idx_end) {
3888
/* Nothing more to process. */
3889
snp_complete_psc(svm, 0);
3890
return 1;
3891
}
3892
3893
svm->sev_es.psc_2m = huge;
3894
svm->sev_es.psc_idx = idx;
3895
svm->sev_es.psc_inflight = 1;
3896
3897
/*
3898
* Find all subsequent PSC entries that contain adjacent GPA
3899
* ranges/operations and can be combined into a single
3900
* KVM_HC_MAP_GPA_RANGE exit.
3901
*/
3902
while (++idx <= idx_end) {
3903
struct psc_entry entry = entries[idx];
3904
3905
if (entry.operation != entry_start.operation ||
3906
entry.gfn != entry_start.gfn + npages ||
3907
entry.cur_page || !!entry.pagesize != huge)
3908
break;
3909
3910
svm->sev_es.psc_inflight++;
3911
npages += huge ? 512 : 1;
3912
}
3913
3914
switch (entry_start.operation) {
3915
case VMGEXIT_PSC_OP_PRIVATE:
3916
case VMGEXIT_PSC_OP_SHARED:
3917
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
3918
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
3919
/*
3920
* In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
3921
* assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
3922
* it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
3923
* vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
3924
*/
3925
vcpu->run->hypercall.ret = 0;
3926
vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn);
3927
vcpu->run->hypercall.args[1] = npages;
3928
vcpu->run->hypercall.args[2] = entry_start.operation == VMGEXIT_PSC_OP_PRIVATE
3929
? KVM_MAP_GPA_RANGE_ENCRYPTED
3930
: KVM_MAP_GPA_RANGE_DECRYPTED;
3931
vcpu->run->hypercall.args[2] |= entry_start.pagesize
3932
? KVM_MAP_GPA_RANGE_PAGE_SZ_2M
3933
: KVM_MAP_GPA_RANGE_PAGE_SZ_4K;
3934
vcpu->arch.complete_userspace_io = snp_complete_one_psc;
3935
return 0; /* forward request to userspace */
3936
default:
3937
/*
3938
* Only shared/private PSC operations are currently supported, so if the
3939
* entire range consists of unsupported operations (e.g. SMASH/UNSMASH),
3940
* then consider the entire range completed and avoid exiting to
3941
* userspace. In theory snp_complete_psc() can always be called directly
3942
* at this point to complete the current range and start the next one,
3943
* but that could lead to unexpected levels of recursion.
3944
*/
3945
__snp_complete_one_psc(svm);
3946
goto next_range;
3947
}
3948
3949
BUG();
3950
}
3951
3952
/*
3953
* Invoked as part of svm_vcpu_reset() processing of an init event.
3954
*/
3955
static void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
3956
{
3957
struct vcpu_svm *svm = to_svm(vcpu);
3958
struct kvm_memory_slot *slot;
3959
struct page *page;
3960
kvm_pfn_t pfn;
3961
gfn_t gfn;
3962
3963
guard(mutex)(&svm->sev_es.snp_vmsa_mutex);
3964
3965
if (!svm->sev_es.snp_ap_waiting_for_reset)
3966
return;
3967
3968
svm->sev_es.snp_ap_waiting_for_reset = false;
3969
3970
/* Mark the vCPU as offline and not runnable */
3971
vcpu->arch.pv.pv_unhalted = false;
3972
kvm_set_mp_state(vcpu, KVM_MP_STATE_HALTED);
3973
3974
/* Clear use of the VMSA */
3975
svm->vmcb->control.vmsa_pa = INVALID_PAGE;
3976
3977
/*
3978
* When replacing the VMSA during SEV-SNP AP creation,
3979
* mark the VMCB dirty so that full state is always reloaded.
3980
*/
3981
vmcb_mark_all_dirty(svm->vmcb);
3982
3983
if (!VALID_PAGE(svm->sev_es.snp_vmsa_gpa))
3984
return;
3985
3986
gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
3987
svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
3988
3989
slot = gfn_to_memslot(vcpu->kvm, gfn);
3990
if (!slot)
3991
return;
3992
3993
/*
3994
* The new VMSA will be private memory guest memory, so retrieve the
3995
* PFN from the gmem backend.
3996
*/
3997
if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
3998
return;
3999
4000
/*
4001
* From this point forward, the VMSA will always be a guest-mapped page
4002
* rather than the initial one allocated by KVM in svm->sev_es.vmsa. In
4003
* theory, svm->sev_es.vmsa could be free'd and cleaned up here, but
4004
* that involves cleanups like flushing caches, which would ideally be
4005
* handled during teardown rather than guest boot. Deferring that also
4006
* allows the existing logic for SEV-ES VMSAs to be re-used with
4007
* minimal SNP-specific changes.
4008
*/
4009
svm->sev_es.snp_has_guest_vmsa = true;
4010
4011
/* Use the new VMSA */
4012
svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
4013
4014
/* Mark the vCPU as runnable */
4015
kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
4016
4017
/*
4018
* gmem pages aren't currently migratable, but if this ever changes
4019
* then care should be taken to ensure svm->sev_es.vmsa is pinned
4020
* through some other means.
4021
*/
4022
kvm_release_page_clean(page);
4023
}
4024
4025
static int sev_snp_ap_creation(struct vcpu_svm *svm)
4026
{
4027
struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
4028
struct kvm_vcpu *vcpu = &svm->vcpu;
4029
struct kvm_vcpu *target_vcpu;
4030
struct vcpu_svm *target_svm;
4031
unsigned int request;
4032
unsigned int apic_id;
4033
4034
request = lower_32_bits(svm->vmcb->control.exit_info_1);
4035
apic_id = upper_32_bits(svm->vmcb->control.exit_info_1);
4036
4037
/* Validate the APIC ID */
4038
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, apic_id);
4039
if (!target_vcpu) {
4040
vcpu_unimpl(vcpu, "vmgexit: invalid AP APIC ID [%#x] from guest\n",
4041
apic_id);
4042
return -EINVAL;
4043
}
4044
4045
target_svm = to_svm(target_vcpu);
4046
4047
guard(mutex)(&target_svm->sev_es.snp_vmsa_mutex);
4048
4049
switch (request) {
4050
case SVM_VMGEXIT_AP_CREATE_ON_INIT:
4051
case SVM_VMGEXIT_AP_CREATE:
4052
if (vcpu->arch.regs[VCPU_REGS_RAX] != sev->vmsa_features) {
4053
vcpu_unimpl(vcpu, "vmgexit: mismatched AP sev_features [%#lx] != [%#llx] from guest\n",
4054
vcpu->arch.regs[VCPU_REGS_RAX], sev->vmsa_features);
4055
return -EINVAL;
4056
}
4057
4058
if (!page_address_valid(vcpu, svm->vmcb->control.exit_info_2)) {
4059
vcpu_unimpl(vcpu, "vmgexit: invalid AP VMSA address [%#llx] from guest\n",
4060
svm->vmcb->control.exit_info_2);
4061
return -EINVAL;
4062
}
4063
4064
/*
4065
* Malicious guest can RMPADJUST a large page into VMSA which
4066
* will hit the SNP erratum where the CPU will incorrectly signal
4067
* an RMP violation #PF if a hugepage collides with the RMP entry
4068
* of VMSA page, reject the AP CREATE request if VMSA address from
4069
* guest is 2M aligned.
4070
*/
4071
if (IS_ALIGNED(svm->vmcb->control.exit_info_2, PMD_SIZE)) {
4072
vcpu_unimpl(vcpu,
4073
"vmgexit: AP VMSA address [%llx] from guest is unsafe as it is 2M aligned\n",
4074
svm->vmcb->control.exit_info_2);
4075
return -EINVAL;
4076
}
4077
4078
target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2;
4079
break;
4080
case SVM_VMGEXIT_AP_DESTROY:
4081
target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
4082
break;
4083
default:
4084
vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n",
4085
request);
4086
return -EINVAL;
4087
}
4088
4089
target_svm->sev_es.snp_ap_waiting_for_reset = true;
4090
4091
/*
4092
* Unless Creation is deferred until INIT, signal the vCPU to update
4093
* its state.
4094
*/
4095
if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT)
4096
kvm_make_request_and_kick(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
4097
4098
return 0;
4099
}
4100
4101
static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
4102
{
4103
struct sev_data_snp_guest_request data = {0};
4104
struct kvm *kvm = svm->vcpu.kvm;
4105
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
4106
sev_ret_code fw_err = 0;
4107
int ret;
4108
4109
if (!sev_snp_guest(kvm))
4110
return -EINVAL;
4111
4112
mutex_lock(&sev->guest_req_mutex);
4113
4114
if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) {
4115
ret = -EIO;
4116
goto out_unlock;
4117
}
4118
4119
data.gctx_paddr = __psp_pa(sev->snp_context);
4120
data.req_paddr = __psp_pa(sev->guest_req_buf);
4121
data.res_paddr = __psp_pa(sev->guest_resp_buf);
4122
4123
/*
4124
* Firmware failures are propagated on to guest, but any other failure
4125
* condition along the way should be reported to userspace. E.g. if
4126
* the PSP is dead and commands are timing out.
4127
*/
4128
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_GUEST_REQUEST, &data, &fw_err);
4129
if (ret && !fw_err)
4130
goto out_unlock;
4131
4132
if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) {
4133
ret = -EIO;
4134
goto out_unlock;
4135
}
4136
4137
/* No action is requested *from KVM* if there was a firmware error. */
4138
svm_vmgexit_no_action(svm, SNP_GUEST_ERR(0, fw_err));
4139
4140
ret = 1; /* resume guest */
4141
4142
out_unlock:
4143
mutex_unlock(&sev->guest_req_mutex);
4144
return ret;
4145
}
4146
4147
static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
4148
{
4149
struct kvm *kvm = svm->vcpu.kvm;
4150
u8 msg_type;
4151
4152
if (!sev_snp_guest(kvm))
4153
return -EINVAL;
4154
4155
if (kvm_read_guest(kvm, req_gpa + offsetof(struct snp_guest_msg_hdr, msg_type),
4156
&msg_type, 1))
4157
return -EIO;
4158
4159
/*
4160
* As per GHCB spec, requests of type MSG_REPORT_REQ also allow for
4161
* additional certificate data to be provided alongside the attestation
4162
* report via the guest-provided data pages indicated by RAX/RBX. The
4163
* certificate data is optional and requires additional KVM enablement
4164
* to provide an interface for userspace to provide it, but KVM still
4165
* needs to be able to handle extended guest requests either way. So
4166
* provide a stub implementation that will always return an empty
4167
* certificate table in the guest-provided data pages.
4168
*/
4169
if (msg_type == SNP_MSG_REPORT_REQ) {
4170
struct kvm_vcpu *vcpu = &svm->vcpu;
4171
u64 data_npages;
4172
gpa_t data_gpa;
4173
4174
if (!kvm_ghcb_rax_is_valid(svm) || !kvm_ghcb_rbx_is_valid(svm))
4175
goto request_invalid;
4176
4177
data_gpa = vcpu->arch.regs[VCPU_REGS_RAX];
4178
data_npages = vcpu->arch.regs[VCPU_REGS_RBX];
4179
4180
if (!PAGE_ALIGNED(data_gpa))
4181
goto request_invalid;
4182
4183
/*
4184
* As per GHCB spec (see "SNP Extended Guest Request"), the
4185
* certificate table is terminated by 24-bytes of zeroes.
4186
*/
4187
if (data_npages && kvm_clear_guest(kvm, data_gpa, 24))
4188
return -EIO;
4189
}
4190
4191
return snp_handle_guest_req(svm, req_gpa, resp_gpa);
4192
4193
request_invalid:
4194
svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT);
4195
return 1; /* resume guest */
4196
}
4197
4198
static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
4199
{
4200
struct vmcb_control_area *control = &svm->vmcb->control;
4201
struct kvm_vcpu *vcpu = &svm->vcpu;
4202
struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
4203
u64 ghcb_info;
4204
int ret = 1;
4205
4206
ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
4207
4208
trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
4209
control->ghcb_gpa);
4210
4211
switch (ghcb_info) {
4212
case GHCB_MSR_SEV_INFO_REQ:
4213
set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
4214
GHCB_VERSION_MIN,
4215
sev_enc_bit));
4216
break;
4217
case GHCB_MSR_CPUID_REQ: {
4218
u64 cpuid_fn, cpuid_reg, cpuid_value;
4219
4220
cpuid_fn = get_ghcb_msr_bits(svm,
4221
GHCB_MSR_CPUID_FUNC_MASK,
4222
GHCB_MSR_CPUID_FUNC_POS);
4223
4224
/* Initialize the registers needed by the CPUID intercept */
4225
vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
4226
vcpu->arch.regs[VCPU_REGS_RCX] = 0;
4227
4228
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
4229
if (!ret) {
4230
/* Error, keep GHCB MSR value as-is */
4231
break;
4232
}
4233
4234
cpuid_reg = get_ghcb_msr_bits(svm,
4235
GHCB_MSR_CPUID_REG_MASK,
4236
GHCB_MSR_CPUID_REG_POS);
4237
if (cpuid_reg == 0)
4238
cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
4239
else if (cpuid_reg == 1)
4240
cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
4241
else if (cpuid_reg == 2)
4242
cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
4243
else
4244
cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
4245
4246
set_ghcb_msr_bits(svm, cpuid_value,
4247
GHCB_MSR_CPUID_VALUE_MASK,
4248
GHCB_MSR_CPUID_VALUE_POS);
4249
4250
set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
4251
GHCB_MSR_INFO_MASK,
4252
GHCB_MSR_INFO_POS);
4253
break;
4254
}
4255
case GHCB_MSR_AP_RESET_HOLD_REQ:
4256
svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_MSR_PROTO;
4257
ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
4258
4259
/*
4260
* Preset the result to a non-SIPI return and then only set
4261
* the result to non-zero when delivering a SIPI.
4262
*/
4263
set_ghcb_msr_bits(svm, 0,
4264
GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
4265
GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
4266
4267
set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
4268
GHCB_MSR_INFO_MASK,
4269
GHCB_MSR_INFO_POS);
4270
break;
4271
case GHCB_MSR_HV_FT_REQ:
4272
set_ghcb_msr_bits(svm, GHCB_HV_FT_SUPPORTED,
4273
GHCB_MSR_HV_FT_MASK, GHCB_MSR_HV_FT_POS);
4274
set_ghcb_msr_bits(svm, GHCB_MSR_HV_FT_RESP,
4275
GHCB_MSR_INFO_MASK, GHCB_MSR_INFO_POS);
4276
break;
4277
case GHCB_MSR_PREF_GPA_REQ:
4278
if (!sev_snp_guest(vcpu->kvm))
4279
goto out_terminate;
4280
4281
set_ghcb_msr_bits(svm, GHCB_MSR_PREF_GPA_NONE, GHCB_MSR_GPA_VALUE_MASK,
4282
GHCB_MSR_GPA_VALUE_POS);
4283
set_ghcb_msr_bits(svm, GHCB_MSR_PREF_GPA_RESP, GHCB_MSR_INFO_MASK,
4284
GHCB_MSR_INFO_POS);
4285
break;
4286
case GHCB_MSR_REG_GPA_REQ: {
4287
u64 gfn;
4288
4289
if (!sev_snp_guest(vcpu->kvm))
4290
goto out_terminate;
4291
4292
gfn = get_ghcb_msr_bits(svm, GHCB_MSR_GPA_VALUE_MASK,
4293
GHCB_MSR_GPA_VALUE_POS);
4294
4295
svm->sev_es.ghcb_registered_gpa = gfn_to_gpa(gfn);
4296
4297
set_ghcb_msr_bits(svm, gfn, GHCB_MSR_GPA_VALUE_MASK,
4298
GHCB_MSR_GPA_VALUE_POS);
4299
set_ghcb_msr_bits(svm, GHCB_MSR_REG_GPA_RESP, GHCB_MSR_INFO_MASK,
4300
GHCB_MSR_INFO_POS);
4301
break;
4302
}
4303
case GHCB_MSR_PSC_REQ:
4304
if (!sev_snp_guest(vcpu->kvm))
4305
goto out_terminate;
4306
4307
ret = snp_begin_psc_msr(svm, control->ghcb_gpa);
4308
break;
4309
case GHCB_MSR_TERM_REQ: {
4310
u64 reason_set, reason_code;
4311
4312
reason_set = get_ghcb_msr_bits(svm,
4313
GHCB_MSR_TERM_REASON_SET_MASK,
4314
GHCB_MSR_TERM_REASON_SET_POS);
4315
reason_code = get_ghcb_msr_bits(svm,
4316
GHCB_MSR_TERM_REASON_MASK,
4317
GHCB_MSR_TERM_REASON_POS);
4318
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
4319
reason_set, reason_code);
4320
4321
goto out_terminate;
4322
}
4323
default:
4324
/* Error, keep GHCB MSR value as-is */
4325
break;
4326
}
4327
4328
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
4329
control->ghcb_gpa, ret);
4330
4331
return ret;
4332
4333
out_terminate:
4334
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
4335
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
4336
vcpu->run->system_event.ndata = 1;
4337
vcpu->run->system_event.data[0] = control->ghcb_gpa;
4338
4339
return 0;
4340
}
4341
4342
int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
4343
{
4344
struct vcpu_svm *svm = to_svm(vcpu);
4345
struct vmcb_control_area *control = &svm->vmcb->control;
4346
u64 ghcb_gpa, exit_code;
4347
int ret;
4348
4349
/* Validate the GHCB */
4350
ghcb_gpa = control->ghcb_gpa;
4351
if (ghcb_gpa & GHCB_MSR_INFO_MASK)
4352
return sev_handle_vmgexit_msr_protocol(svm);
4353
4354
if (!ghcb_gpa) {
4355
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
4356
4357
/* Without a GHCB, just return right back to the guest */
4358
return 1;
4359
}
4360
4361
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
4362
/* Unable to map GHCB from guest */
4363
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
4364
ghcb_gpa);
4365
4366
/* Without a GHCB, just return right back to the guest */
4367
return 1;
4368
}
4369
4370
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
4371
4372
trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
4373
4374
sev_es_sync_from_ghcb(svm);
4375
4376
/* SEV-SNP guest requires that the GHCB GPA must be registered */
4377
if (sev_snp_guest(svm->vcpu.kvm) && !ghcb_gpa_is_registered(svm, ghcb_gpa)) {
4378
vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB GPA [%#llx] is not registered.\n", ghcb_gpa);
4379
return -EINVAL;
4380
}
4381
4382
ret = sev_es_validate_vmgexit(svm);
4383
if (ret)
4384
return ret;
4385
4386
svm_vmgexit_success(svm, 0);
4387
4388
exit_code = kvm_get_cached_sw_exit_code(control);
4389
switch (exit_code) {
4390
case SVM_VMGEXIT_MMIO_READ:
4391
ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
4392
if (ret)
4393
break;
4394
4395
ret = kvm_sev_es_mmio_read(vcpu,
4396
control->exit_info_1,
4397
control->exit_info_2,
4398
svm->sev_es.ghcb_sa);
4399
break;
4400
case SVM_VMGEXIT_MMIO_WRITE:
4401
ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
4402
if (ret)
4403
break;
4404
4405
ret = kvm_sev_es_mmio_write(vcpu,
4406
control->exit_info_1,
4407
control->exit_info_2,
4408
svm->sev_es.ghcb_sa);
4409
break;
4410
case SVM_VMGEXIT_NMI_COMPLETE:
4411
++vcpu->stat.nmi_window_exits;
4412
svm->nmi_masked = false;
4413
kvm_make_request(KVM_REQ_EVENT, vcpu);
4414
ret = 1;
4415
break;
4416
case SVM_VMGEXIT_AP_HLT_LOOP:
4417
svm->sev_es.ap_reset_hold_type = AP_RESET_HOLD_NAE_EVENT;
4418
ret = kvm_emulate_ap_reset_hold(vcpu);
4419
break;
4420
case SVM_VMGEXIT_AP_JUMP_TABLE: {
4421
struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
4422
4423
switch (control->exit_info_1) {
4424
case 0:
4425
/* Set AP jump table address */
4426
sev->ap_jump_table = control->exit_info_2;
4427
break;
4428
case 1:
4429
/* Get AP jump table address */
4430
svm_vmgexit_success(svm, sev->ap_jump_table);
4431
break;
4432
default:
4433
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
4434
control->exit_info_1);
4435
svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT);
4436
}
4437
4438
ret = 1;
4439
break;
4440
}
4441
case SVM_VMGEXIT_HV_FEATURES:
4442
svm_vmgexit_success(svm, GHCB_HV_FT_SUPPORTED);
4443
ret = 1;
4444
break;
4445
case SVM_VMGEXIT_TERM_REQUEST:
4446
pr_info("SEV-ES guest requested termination: reason %#llx info %#llx\n",
4447
control->exit_info_1, control->exit_info_2);
4448
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
4449
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
4450
vcpu->run->system_event.ndata = 1;
4451
vcpu->run->system_event.data[0] = control->ghcb_gpa;
4452
break;
4453
case SVM_VMGEXIT_PSC:
4454
ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
4455
if (ret)
4456
break;
4457
4458
ret = snp_begin_psc(svm, svm->sev_es.ghcb_sa);
4459
break;
4460
case SVM_VMGEXIT_AP_CREATION:
4461
ret = sev_snp_ap_creation(svm);
4462
if (ret) {
4463
svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT);
4464
}
4465
4466
ret = 1;
4467
break;
4468
case SVM_VMGEXIT_GUEST_REQUEST:
4469
ret = snp_handle_guest_req(svm, control->exit_info_1, control->exit_info_2);
4470
break;
4471
case SVM_VMGEXIT_EXT_GUEST_REQUEST:
4472
ret = snp_handle_ext_guest_req(svm, control->exit_info_1, control->exit_info_2);
4473
break;
4474
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
4475
vcpu_unimpl(vcpu,
4476
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
4477
control->exit_info_1, control->exit_info_2);
4478
ret = -EINVAL;
4479
break;
4480
default:
4481
ret = svm_invoke_exit_handler(vcpu, exit_code);
4482
}
4483
4484
return ret;
4485
}
4486
4487
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
4488
{
4489
int count;
4490
int bytes;
4491
int r;
4492
4493
if (svm->vmcb->control.exit_info_2 > INT_MAX)
4494
return -EINVAL;
4495
4496
count = svm->vmcb->control.exit_info_2;
4497
if (unlikely(check_mul_overflow(count, size, &bytes)))
4498
return -EINVAL;
4499
4500
r = setup_vmgexit_scratch(svm, in, bytes);
4501
if (r)
4502
return r;
4503
4504
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
4505
count, in);
4506
}
4507
4508
void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
4509
{
4510
/* Clear intercepts on MSRs that are context switched by hardware. */
4511
svm_disable_intercept_for_msr(vcpu, MSR_AMD64_SEV_ES_GHCB, MSR_TYPE_RW);
4512
svm_disable_intercept_for_msr(vcpu, MSR_EFER, MSR_TYPE_RW);
4513
svm_disable_intercept_for_msr(vcpu, MSR_IA32_CR_PAT, MSR_TYPE_RW);
4514
4515
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX))
4516
svm_set_intercept_for_msr(vcpu, MSR_TSC_AUX, MSR_TYPE_RW,
4517
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
4518
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID));
4519
4520
svm_set_intercept_for_msr(vcpu, MSR_AMD64_GUEST_TSC_FREQ, MSR_TYPE_R,
4521
!snp_is_secure_tsc_enabled(vcpu->kvm));
4522
4523
/*
4524
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
4525
* the host/guest supports its use.
4526
*
4527
* KVM treats the guest as being capable of using XSAVES even if XSAVES
4528
* isn't enabled in guest CPUID as there is no intercept for XSAVES,
4529
* i.e. the guest can use XSAVES/XRSTOR to read/write XSS if XSAVE is
4530
* exposed to the guest and XSAVES is supported in hardware. Condition
4531
* full XSS passthrough on the guest being able to use XSAVES *and*
4532
* XSAVES being exposed to the guest so that KVM can at least honor
4533
* guest CPUID for RDMSR and WRMSR.
4534
*/
4535
svm_set_intercept_for_msr(vcpu, MSR_IA32_XSS, MSR_TYPE_RW,
4536
!guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) ||
4537
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES));
4538
}
4539
4540
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
4541
{
4542
struct kvm_vcpu *vcpu = &svm->vcpu;
4543
struct kvm_cpuid_entry2 *best;
4544
4545
/* For sev guests, the memory encryption bit is not reserved in CR3. */
4546
best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
4547
if (best)
4548
vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
4549
}
4550
4551
static void sev_es_init_vmcb(struct vcpu_svm *svm, bool init_event)
4552
{
4553
struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
4554
struct vmcb *vmcb = svm->vmcb01.ptr;
4555
4556
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
4557
4558
/*
4559
* An SEV-ES guest requires a VMSA area that is a separate from the
4560
* VMCB page. Do not include the encryption mask on the VMSA physical
4561
* address since hardware will access it using the guest key. Note,
4562
* the VMSA will be NULL if this vCPU is the destination for intrahost
4563
* migration, and will be copied later.
4564
*/
4565
if (!svm->sev_es.snp_has_guest_vmsa) {
4566
if (svm->sev_es.vmsa)
4567
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
4568
else
4569
svm->vmcb->control.vmsa_pa = INVALID_PAGE;
4570
}
4571
4572
if (cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES))
4573
svm->vmcb->control.allowed_sev_features = sev->vmsa_features |
4574
VMCB_ALLOWED_SEV_FEATURES_VALID;
4575
4576
/* Can't intercept CR register access, HV can't modify CR registers */
4577
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
4578
svm_clr_intercept(svm, INTERCEPT_CR4_READ);
4579
svm_clr_intercept(svm, INTERCEPT_CR8_READ);
4580
svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
4581
svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
4582
svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
4583
4584
svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
4585
4586
/* Track EFER/CR register changes */
4587
svm_set_intercept(svm, TRAP_EFER_WRITE);
4588
svm_set_intercept(svm, TRAP_CR0_WRITE);
4589
svm_set_intercept(svm, TRAP_CR4_WRITE);
4590
svm_set_intercept(svm, TRAP_CR8_WRITE);
4591
4592
vmcb->control.intercepts[INTERCEPT_DR] = 0;
4593
if (!sev_vcpu_has_debug_swap(svm)) {
4594
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
4595
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
4596
recalc_intercepts(svm);
4597
} else {
4598
/*
4599
* Disable #DB intercept iff DebugSwap is enabled. KVM doesn't
4600
* allow debugging SEV-ES guests, and enables DebugSwap iff
4601
* NO_NESTED_DATA_BP is supported, so there's no reason to
4602
* intercept #DB when DebugSwap is enabled. For simplicity
4603
* with respect to guest debug, intercept #DB for other VMs
4604
* even if NO_NESTED_DATA_BP is supported, i.e. even if the
4605
* guest can't DoS the CPU with infinite #DB vectoring.
4606
*/
4607
clr_exception_intercept(svm, DB_VECTOR);
4608
}
4609
4610
/* Can't intercept XSETBV, HV can't modify XCR0 directly */
4611
svm_clr_intercept(svm, INTERCEPT_XSETBV);
4612
4613
/*
4614
* Set the GHCB MSR value as per the GHCB specification when emulating
4615
* vCPU RESET for an SEV-ES guest.
4616
*/
4617
if (!init_event)
4618
set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
4619
GHCB_VERSION_MIN,
4620
sev_enc_bit));
4621
}
4622
4623
void sev_init_vmcb(struct vcpu_svm *svm, bool init_event)
4624
{
4625
struct kvm_vcpu *vcpu = &svm->vcpu;
4626
4627
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
4628
clr_exception_intercept(svm, UD_VECTOR);
4629
4630
/*
4631
* Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
4632
* KVM can't decrypt guest memory to decode the faulting instruction.
4633
*/
4634
clr_exception_intercept(svm, GP_VECTOR);
4635
4636
if (init_event && sev_snp_guest(vcpu->kvm))
4637
sev_snp_init_protected_guest_state(vcpu);
4638
4639
if (sev_es_guest(vcpu->kvm))
4640
sev_es_init_vmcb(svm, init_event);
4641
}
4642
4643
int sev_vcpu_create(struct kvm_vcpu *vcpu)
4644
{
4645
struct vcpu_svm *svm = to_svm(vcpu);
4646
struct page *vmsa_page;
4647
4648
mutex_init(&svm->sev_es.snp_vmsa_mutex);
4649
4650
if (!sev_es_guest(vcpu->kvm))
4651
return 0;
4652
4653
/*
4654
* SEV-ES guests require a separate (from the VMCB) VMSA page used to
4655
* contain the encrypted register state of the guest.
4656
*/
4657
vmsa_page = snp_safe_alloc_page();
4658
if (!vmsa_page)
4659
return -ENOMEM;
4660
4661
svm->sev_es.vmsa = page_address(vmsa_page);
4662
4663
vcpu->arch.guest_tsc_protected = snp_is_secure_tsc_enabled(vcpu->kvm);
4664
4665
return 0;
4666
}
4667
4668
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
4669
{
4670
struct kvm *kvm = svm->vcpu.kvm;
4671
4672
/*
4673
* All host state for SEV-ES guests is categorized into three swap types
4674
* based on how it is handled by hardware during a world switch:
4675
*
4676
* A: VMRUN: Host state saved in host save area
4677
* VMEXIT: Host state loaded from host save area
4678
*
4679
* B: VMRUN: Host state _NOT_ saved in host save area
4680
* VMEXIT: Host state loaded from host save area
4681
*
4682
* C: VMRUN: Host state _NOT_ saved in host save area
4683
* VMEXIT: Host state initialized to default(reset) values
4684
*
4685
* Manually save type-B state, i.e. state that is loaded by VMEXIT but
4686
* isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
4687
* by common SVM code).
4688
*/
4689
hostsa->xcr0 = kvm_host.xcr0;
4690
hostsa->pkru = read_pkru();
4691
hostsa->xss = kvm_host.xss;
4692
4693
/*
4694
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
4695
* the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
4696
* not save or load debug registers. Sadly, KVM can't prevent SNP
4697
* guests from lying about DebugSwap on secondary vCPUs, i.e. the
4698
* SEV_FEATURES provided at "AP Create" isn't guaranteed to match what
4699
* the guest has actually enabled (or not!) in the VMSA.
4700
*
4701
* If DebugSwap is *possible*, save the masks so that they're restored
4702
* if the guest enables DebugSwap. But for the DRs themselves, do NOT
4703
* rely on the CPU to restore the host values; KVM will restore them as
4704
* needed in common code, via hw_breakpoint_restore(). Note, KVM does
4705
* NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs
4706
* don't need to be restored per se, KVM just needs to ensure they are
4707
* loaded with the correct values *if* the CPU writes the MSRs.
4708
*/
4709
if (sev_vcpu_has_debug_swap(svm) ||
4710
(sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
4711
hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
4712
hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
4713
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
4714
hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
4715
}
4716
4717
/*
4718
* TSC_AUX is always virtualized for SEV-ES guests when the feature is
4719
* available, i.e. TSC_AUX is loaded on #VMEXIT from the host save area.
4720
* Set the save area to the current hardware value, i.e. the current
4721
* user return value, so that the correct value is restored on #VMEXIT.
4722
*/
4723
if (cpu_feature_enabled(X86_FEATURE_V_TSC_AUX) &&
4724
!WARN_ON_ONCE(tsc_aux_uret_slot < 0))
4725
hostsa->tsc_aux = kvm_get_user_return_msr(tsc_aux_uret_slot);
4726
}
4727
4728
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4729
{
4730
struct vcpu_svm *svm = to_svm(vcpu);
4731
4732
/* First SIPI: Use the values as initially set by the VMM */
4733
if (!svm->sev_es.received_first_sipi) {
4734
svm->sev_es.received_first_sipi = true;
4735
return;
4736
}
4737
4738
/* Subsequent SIPI */
4739
switch (svm->sev_es.ap_reset_hold_type) {
4740
case AP_RESET_HOLD_NAE_EVENT:
4741
/*
4742
* Return from an AP Reset Hold VMGEXIT, where the guest will
4743
* set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
4744
*/
4745
svm_vmgexit_success(svm, 1);
4746
break;
4747
case AP_RESET_HOLD_MSR_PROTO:
4748
/*
4749
* Return from an AP Reset Hold VMGEXIT, where the guest will
4750
* set the CS and RIP. Set GHCB data field to a non-zero value.
4751
*/
4752
set_ghcb_msr_bits(svm, 1,
4753
GHCB_MSR_AP_RESET_HOLD_RESULT_MASK,
4754
GHCB_MSR_AP_RESET_HOLD_RESULT_POS);
4755
4756
set_ghcb_msr_bits(svm, GHCB_MSR_AP_RESET_HOLD_RESP,
4757
GHCB_MSR_INFO_MASK,
4758
GHCB_MSR_INFO_POS);
4759
break;
4760
default:
4761
break;
4762
}
4763
}
4764
4765
struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
4766
{
4767
unsigned long pfn;
4768
struct page *p;
4769
4770
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
4771
return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
4772
4773
/*
4774
* Allocate an SNP-safe page to workaround the SNP erratum where
4775
* the CPU will incorrectly signal an RMP violation #PF if a
4776
* hugepage (2MB or 1GB) collides with the RMP entry of a
4777
* 2MB-aligned VMCB, VMSA, or AVIC backing page.
4778
*
4779
* Allocate one extra page, choose a page which is not
4780
* 2MB-aligned, and free the other.
4781
*/
4782
p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
4783
if (!p)
4784
return NULL;
4785
4786
split_page(p, 1);
4787
4788
pfn = page_to_pfn(p);
4789
if (IS_ALIGNED(pfn, PTRS_PER_PMD))
4790
__free_page(p++);
4791
else
4792
__free_page(p + 1);
4793
4794
return p;
4795
}
4796
4797
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
4798
{
4799
struct kvm_memory_slot *slot;
4800
struct kvm *kvm = vcpu->kvm;
4801
int order, rmp_level, ret;
4802
struct page *page;
4803
bool assigned;
4804
kvm_pfn_t pfn;
4805
gfn_t gfn;
4806
4807
gfn = gpa >> PAGE_SHIFT;
4808
4809
/*
4810
* The only time RMP faults occur for shared pages is when the guest is
4811
* triggering an RMP fault for an implicit page-state change from
4812
* shared->private. Implicit page-state changes are forwarded to
4813
* userspace via KVM_EXIT_MEMORY_FAULT events, however, so RMP faults
4814
* for shared pages should not end up here.
4815
*/
4816
if (!kvm_mem_is_private(kvm, gfn)) {
4817
pr_warn_ratelimited("SEV: Unexpected RMP fault for non-private GPA 0x%llx\n",
4818
gpa);
4819
return;
4820
}
4821
4822
slot = gfn_to_memslot(kvm, gfn);
4823
if (!kvm_slot_has_gmem(slot)) {
4824
pr_warn_ratelimited("SEV: Unexpected RMP fault, non-private slot for GPA 0x%llx\n",
4825
gpa);
4826
return;
4827
}
4828
4829
ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &page, &order);
4830
if (ret) {
4831
pr_warn_ratelimited("SEV: Unexpected RMP fault, no backing page for private GPA 0x%llx\n",
4832
gpa);
4833
return;
4834
}
4835
4836
ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
4837
if (ret || !assigned) {
4838
pr_warn_ratelimited("SEV: Unexpected RMP fault, no assigned RMP entry found for GPA 0x%llx PFN 0x%llx error %d\n",
4839
gpa, pfn, ret);
4840
goto out_no_trace;
4841
}
4842
4843
/*
4844
* There are 2 cases where a PSMASH may be needed to resolve an #NPF
4845
* with PFERR_GUEST_RMP_BIT set:
4846
*
4847
* 1) RMPADJUST/PVALIDATE can trigger an #NPF with PFERR_GUEST_SIZEM
4848
* bit set if the guest issues them with a smaller granularity than
4849
* what is indicated by the page-size bit in the 2MB RMP entry for
4850
* the PFN that backs the GPA.
4851
*
4852
* 2) Guest access via NPT can trigger an #NPF if the NPT mapping is
4853
* smaller than what is indicated by the 2MB RMP entry for the PFN
4854
* that backs the GPA.
4855
*
4856
* In both these cases, the corresponding 2M RMP entry needs to
4857
* be PSMASH'd to 512 4K RMP entries. If the RMP entry is already
4858
* split into 4K RMP entries, then this is likely a spurious case which
4859
* can occur when there are concurrent accesses by the guest to a 2MB
4860
* GPA range that is backed by a 2MB-aligned PFN who's RMP entry is in
4861
* the process of being PMASH'd into 4K entries. These cases should
4862
* resolve automatically on subsequent accesses, so just ignore them
4863
* here.
4864
*/
4865
if (rmp_level == PG_LEVEL_4K)
4866
goto out;
4867
4868
ret = snp_rmptable_psmash(pfn);
4869
if (ret) {
4870
/*
4871
* Look it up again. If it's 4K now then the PSMASH may have
4872
* raced with another process and the issue has already resolved
4873
* itself.
4874
*/
4875
if (!snp_lookup_rmpentry(pfn, &assigned, &rmp_level) &&
4876
assigned && rmp_level == PG_LEVEL_4K)
4877
goto out;
4878
4879
pr_warn_ratelimited("SEV: Unable to split RMP entry for GPA 0x%llx PFN 0x%llx ret %d\n",
4880
gpa, pfn, ret);
4881
}
4882
4883
kvm_zap_gfn_range(kvm, gfn, gfn + PTRS_PER_PMD);
4884
out:
4885
trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret);
4886
out_no_trace:
4887
kvm_release_page_unused(page);
4888
}
4889
4890
static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
4891
{
4892
kvm_pfn_t pfn = start;
4893
4894
while (pfn < end) {
4895
int ret, rmp_level;
4896
bool assigned;
4897
4898
ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
4899
if (ret) {
4900
pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
4901
pfn, start, end, rmp_level, ret);
4902
return false;
4903
}
4904
4905
if (assigned) {
4906
pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
4907
__func__, pfn, start, end, rmp_level);
4908
return false;
4909
}
4910
4911
pfn++;
4912
}
4913
4914
return true;
4915
}
4916
4917
static u8 max_level_for_order(int order)
4918
{
4919
if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4920
return PG_LEVEL_2M;
4921
4922
return PG_LEVEL_4K;
4923
}
4924
4925
static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
4926
{
4927
kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
4928
4929
/*
4930
* If this is a large folio, and the entire 2M range containing the
4931
* PFN is currently shared, then the entire 2M-aligned range can be
4932
* set to private via a single 2M RMP entry.
4933
*/
4934
if (max_level_for_order(order) > PG_LEVEL_4K &&
4935
is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
4936
return true;
4937
4938
return false;
4939
}
4940
4941
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
4942
{
4943
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
4944
kvm_pfn_t pfn_aligned;
4945
gfn_t gfn_aligned;
4946
int level, rc;
4947
bool assigned;
4948
4949
if (!sev_snp_guest(kvm))
4950
return 0;
4951
4952
rc = snp_lookup_rmpentry(pfn, &assigned, &level);
4953
if (rc) {
4954
pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
4955
gfn, pfn, rc);
4956
return -ENOENT;
4957
}
4958
4959
if (assigned) {
4960
pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
4961
__func__, gfn, pfn, max_order, level);
4962
return 0;
4963
}
4964
4965
if (is_large_rmp_possible(kvm, pfn, max_order)) {
4966
level = PG_LEVEL_2M;
4967
pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
4968
gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
4969
} else {
4970
level = PG_LEVEL_4K;
4971
pfn_aligned = pfn;
4972
gfn_aligned = gfn;
4973
}
4974
4975
rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
4976
if (rc) {
4977
pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
4978
gfn, pfn, level, rc);
4979
return -EINVAL;
4980
}
4981
4982
pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
4983
__func__, gfn, pfn, pfn_aligned, max_order, level);
4984
4985
return 0;
4986
}
4987
4988
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
4989
{
4990
kvm_pfn_t pfn;
4991
4992
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
4993
return;
4994
4995
pr_debug("%s: PFN start 0x%llx PFN end 0x%llx\n", __func__, start, end);
4996
4997
for (pfn = start; pfn < end;) {
4998
bool use_2m_update = false;
4999
int rc, rmp_level;
5000
bool assigned;
5001
5002
rc = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
5003
if (rc || !assigned)
5004
goto next_pfn;
5005
5006
use_2m_update = IS_ALIGNED(pfn, PTRS_PER_PMD) &&
5007
end >= (pfn + PTRS_PER_PMD) &&
5008
rmp_level > PG_LEVEL_4K;
5009
5010
/*
5011
* If an unaligned PFN corresponds to a 2M region assigned as a
5012
* large page in the RMP table, PSMASH the region into individual
5013
* 4K RMP entries before attempting to convert a 4K sub-page.
5014
*/
5015
if (!use_2m_update && rmp_level > PG_LEVEL_4K) {
5016
/*
5017
* This shouldn't fail, but if it does, report it, but
5018
* still try to update RMP entry to shared and pray this
5019
* was a spurious error that can be addressed later.
5020
*/
5021
rc = snp_rmptable_psmash(pfn);
5022
WARN_ONCE(rc, "SEV: Failed to PSMASH RMP entry for PFN 0x%llx error %d\n",
5023
pfn, rc);
5024
}
5025
5026
rc = rmp_make_shared(pfn, use_2m_update ? PG_LEVEL_2M : PG_LEVEL_4K);
5027
if (WARN_ONCE(rc, "SEV: Failed to update RMP entry for PFN 0x%llx error %d\n",
5028
pfn, rc))
5029
goto next_pfn;
5030
5031
/*
5032
* SEV-ES avoids host/guest cache coherency issues through
5033
* WBNOINVD hooks issued via MMU notifiers during run-time, and
5034
* KVM's VM destroy path at shutdown. Those MMU notifier events
5035
* don't cover gmem since there is no requirement to map pages
5036
* to a HVA in order to use them for a running guest. While the
5037
* shutdown path would still likely cover things for SNP guests,
5038
* userspace may also free gmem pages during run-time via
5039
* hole-punching operations on the guest_memfd, so flush the
5040
* cache entries for these pages before free'ing them back to
5041
* the host.
5042
*/
5043
clflush_cache_range(__va(pfn_to_hpa(pfn)),
5044
use_2m_update ? PMD_SIZE : PAGE_SIZE);
5045
next_pfn:
5046
pfn += use_2m_update ? PTRS_PER_PMD : 1;
5047
cond_resched();
5048
}
5049
}
5050
5051
int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
5052
{
5053
int level, rc;
5054
bool assigned;
5055
5056
if (!sev_snp_guest(kvm))
5057
return 0;
5058
5059
rc = snp_lookup_rmpentry(pfn, &assigned, &level);
5060
if (rc || !assigned)
5061
return PG_LEVEL_4K;
5062
5063
return level;
5064
}
5065
5066
struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
5067
{
5068
struct vcpu_svm *svm = to_svm(vcpu);
5069
struct vmcb_save_area *vmsa;
5070
struct kvm_sev_info *sev;
5071
int error = 0;
5072
int ret;
5073
5074
if (!sev_es_guest(vcpu->kvm))
5075
return NULL;
5076
5077
/*
5078
* If the VMSA has not yet been encrypted, return a pointer to the
5079
* current un-encrypted VMSA.
5080
*/
5081
if (!vcpu->arch.guest_state_protected)
5082
return (struct vmcb_save_area *)svm->sev_es.vmsa;
5083
5084
sev = to_kvm_sev_info(vcpu->kvm);
5085
5086
/* Check if the SEV policy allows debugging */
5087
if (sev_snp_guest(vcpu->kvm)) {
5088
if (!(sev->policy & SNP_POLICY_DEBUG))
5089
return NULL;
5090
} else {
5091
if (sev->policy & SEV_POLICY_NODBG)
5092
return NULL;
5093
}
5094
5095
if (sev_snp_guest(vcpu->kvm)) {
5096
struct sev_data_snp_dbg dbg = {0};
5097
5098
vmsa = snp_alloc_firmware_page(__GFP_ZERO);
5099
if (!vmsa)
5100
return NULL;
5101
5102
dbg.gctx_paddr = __psp_pa(sev->snp_context);
5103
dbg.src_addr = svm->vmcb->control.vmsa_pa;
5104
dbg.dst_addr = __psp_pa(vmsa);
5105
5106
ret = sev_do_cmd(SEV_CMD_SNP_DBG_DECRYPT, &dbg, &error);
5107
5108
/*
5109
* Return the target page to a hypervisor page no matter what.
5110
* If this fails, the page can't be used, so leak it and don't
5111
* try to use it.
5112
*/
5113
if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
5114
return NULL;
5115
5116
if (ret) {
5117
pr_err("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n",
5118
ret, error, error);
5119
free_page((unsigned long)vmsa);
5120
5121
return NULL;
5122
}
5123
} else {
5124
struct sev_data_dbg dbg = {0};
5125
struct page *vmsa_page;
5126
5127
vmsa_page = alloc_page(GFP_KERNEL);
5128
if (!vmsa_page)
5129
return NULL;
5130
5131
vmsa = page_address(vmsa_page);
5132
5133
dbg.handle = sev->handle;
5134
dbg.src_addr = svm->vmcb->control.vmsa_pa;
5135
dbg.dst_addr = __psp_pa(vmsa);
5136
dbg.len = PAGE_SIZE;
5137
5138
ret = sev_do_cmd(SEV_CMD_DBG_DECRYPT, &dbg, &error);
5139
if (ret) {
5140
pr_err("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n",
5141
ret, error, error);
5142
__free_page(vmsa_page);
5143
5144
return NULL;
5145
}
5146
}
5147
5148
return vmsa;
5149
}
5150
5151
void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa)
5152
{
5153
/* If the VMSA has not yet been encrypted, nothing was allocated */
5154
if (!vcpu->arch.guest_state_protected || !vmsa)
5155
return;
5156
5157
free_page((unsigned long)vmsa);
5158
}
5159
5160