Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_sbi_fwft.c
29521 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2025 Rivos Inc.
4
*
5
* Authors:
6
* Clément Léger <[email protected]>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/err.h>
11
#include <linux/kvm_host.h>
12
#include <asm/cpufeature.h>
13
#include <asm/sbi.h>
14
#include <asm/kvm_vcpu_sbi.h>
15
#include <asm/kvm_vcpu_sbi_fwft.h>
16
17
#define MIS_DELEG (BIT_ULL(EXC_LOAD_MISALIGNED) | BIT_ULL(EXC_STORE_MISALIGNED))
18
19
struct kvm_sbi_fwft_feature {
20
/**
21
* @id: Feature ID
22
*/
23
enum sbi_fwft_feature_t id;
24
25
/**
26
* @first_reg_num: ONE_REG index of the first ONE_REG register
27
*/
28
unsigned long first_reg_num;
29
30
/**
31
* @supported: Check if the feature is supported on the vcpu
32
*
33
* This callback is optional, if not provided the feature is assumed to
34
* be supported
35
*/
36
bool (*supported)(struct kvm_vcpu *vcpu);
37
38
/**
39
* @reset: Reset the feature value irrespective whether feature is supported or not
40
*
41
* This callback is mandatory
42
*/
43
void (*reset)(struct kvm_vcpu *vcpu);
44
45
/**
46
* @set: Set the feature value
47
*
48
* Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*)
49
*
50
* This callback is mandatory
51
*/
52
long (*set)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf,
53
bool one_reg_access, unsigned long value);
54
55
/**
56
* @get: Get the feature current value
57
*
58
* Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*)
59
*
60
* This callback is mandatory
61
*/
62
long (*get)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf,
63
bool one_reg_access, unsigned long *value);
64
};
65
66
static const enum sbi_fwft_feature_t kvm_fwft_defined_features[] = {
67
SBI_FWFT_MISALIGNED_EXC_DELEG,
68
SBI_FWFT_LANDING_PAD,
69
SBI_FWFT_SHADOW_STACK,
70
SBI_FWFT_DOUBLE_TRAP,
71
SBI_FWFT_PTE_AD_HW_UPDATING,
72
SBI_FWFT_POINTER_MASKING_PMLEN,
73
};
74
75
static bool kvm_fwft_is_defined_feature(enum sbi_fwft_feature_t feature)
76
{
77
int i;
78
79
for (i = 0; i < ARRAY_SIZE(kvm_fwft_defined_features); i++) {
80
if (kvm_fwft_defined_features[i] == feature)
81
return true;
82
}
83
84
return false;
85
}
86
87
static bool kvm_sbi_fwft_misaligned_delegation_supported(struct kvm_vcpu *vcpu)
88
{
89
return misaligned_traps_can_delegate();
90
}
91
92
static void kvm_sbi_fwft_reset_misaligned_delegation(struct kvm_vcpu *vcpu)
93
{
94
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
95
96
cfg->hedeleg &= ~MIS_DELEG;
97
}
98
99
static long kvm_sbi_fwft_set_misaligned_delegation(struct kvm_vcpu *vcpu,
100
struct kvm_sbi_fwft_config *conf,
101
bool one_reg_access, unsigned long value)
102
{
103
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
104
105
if (value == 1) {
106
cfg->hedeleg |= MIS_DELEG;
107
if (!one_reg_access)
108
csr_set(CSR_HEDELEG, MIS_DELEG);
109
} else if (value == 0) {
110
cfg->hedeleg &= ~MIS_DELEG;
111
if (!one_reg_access)
112
csr_clear(CSR_HEDELEG, MIS_DELEG);
113
} else {
114
return SBI_ERR_INVALID_PARAM;
115
}
116
117
return SBI_SUCCESS;
118
}
119
120
static long kvm_sbi_fwft_get_misaligned_delegation(struct kvm_vcpu *vcpu,
121
struct kvm_sbi_fwft_config *conf,
122
bool one_reg_access, unsigned long *value)
123
{
124
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
125
126
*value = (cfg->hedeleg & MIS_DELEG) == MIS_DELEG;
127
return SBI_SUCCESS;
128
}
129
130
#ifndef CONFIG_32BIT
131
132
static bool try_to_set_pmm(unsigned long value)
133
{
134
csr_set(CSR_HENVCFG, value);
135
return (csr_read_clear(CSR_HENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
136
}
137
138
static bool kvm_sbi_fwft_pointer_masking_pmlen_supported(struct kvm_vcpu *vcpu)
139
{
140
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
141
142
if (!riscv_isa_extension_available(vcpu->arch.isa, SMNPM))
143
return false;
144
145
fwft->have_vs_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
146
fwft->have_vs_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
147
148
return fwft->have_vs_pmlen_7 || fwft->have_vs_pmlen_16;
149
}
150
151
static void kvm_sbi_fwft_reset_pointer_masking_pmlen(struct kvm_vcpu *vcpu)
152
{
153
vcpu->arch.cfg.henvcfg &= ~ENVCFG_PMM;
154
}
155
156
static long kvm_sbi_fwft_set_pointer_masking_pmlen(struct kvm_vcpu *vcpu,
157
struct kvm_sbi_fwft_config *conf,
158
bool one_reg_access, unsigned long value)
159
{
160
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
161
unsigned long pmm;
162
163
switch (value) {
164
case 0:
165
pmm = ENVCFG_PMM_PMLEN_0;
166
break;
167
case 7:
168
if (!fwft->have_vs_pmlen_7)
169
return SBI_ERR_INVALID_PARAM;
170
pmm = ENVCFG_PMM_PMLEN_7;
171
break;
172
case 16:
173
if (!fwft->have_vs_pmlen_16)
174
return SBI_ERR_INVALID_PARAM;
175
pmm = ENVCFG_PMM_PMLEN_16;
176
break;
177
default:
178
return SBI_ERR_INVALID_PARAM;
179
}
180
181
vcpu->arch.cfg.henvcfg &= ~ENVCFG_PMM;
182
vcpu->arch.cfg.henvcfg |= pmm;
183
184
/*
185
* Instead of waiting for vcpu_load/put() to update HENVCFG CSR,
186
* update here so that VCPU see's pointer masking mode change
187
* immediately.
188
*/
189
if (!one_reg_access)
190
csr_write(CSR_HENVCFG, vcpu->arch.cfg.henvcfg);
191
192
return SBI_SUCCESS;
193
}
194
195
static long kvm_sbi_fwft_get_pointer_masking_pmlen(struct kvm_vcpu *vcpu,
196
struct kvm_sbi_fwft_config *conf,
197
bool one_reg_access, unsigned long *value)
198
{
199
switch (vcpu->arch.cfg.henvcfg & ENVCFG_PMM) {
200
case ENVCFG_PMM_PMLEN_0:
201
*value = 0;
202
break;
203
case ENVCFG_PMM_PMLEN_7:
204
*value = 7;
205
break;
206
case ENVCFG_PMM_PMLEN_16:
207
*value = 16;
208
break;
209
default:
210
return SBI_ERR_FAILURE;
211
}
212
213
return SBI_SUCCESS;
214
}
215
216
#endif
217
218
static const struct kvm_sbi_fwft_feature features[] = {
219
{
220
.id = SBI_FWFT_MISALIGNED_EXC_DELEG,
221
.first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, misaligned_deleg.enable) /
222
sizeof(unsigned long),
223
.supported = kvm_sbi_fwft_misaligned_delegation_supported,
224
.reset = kvm_sbi_fwft_reset_misaligned_delegation,
225
.set = kvm_sbi_fwft_set_misaligned_delegation,
226
.get = kvm_sbi_fwft_get_misaligned_delegation,
227
},
228
#ifndef CONFIG_32BIT
229
{
230
.id = SBI_FWFT_POINTER_MASKING_PMLEN,
231
.first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, pointer_masking.enable) /
232
sizeof(unsigned long),
233
.supported = kvm_sbi_fwft_pointer_masking_pmlen_supported,
234
.reset = kvm_sbi_fwft_reset_pointer_masking_pmlen,
235
.set = kvm_sbi_fwft_set_pointer_masking_pmlen,
236
.get = kvm_sbi_fwft_get_pointer_masking_pmlen,
237
},
238
#endif
239
};
240
241
static const struct kvm_sbi_fwft_feature *kvm_sbi_fwft_regnum_to_feature(unsigned long reg_num)
242
{
243
const struct kvm_sbi_fwft_feature *feature;
244
int i;
245
246
for (i = 0; i < ARRAY_SIZE(features); i++) {
247
feature = &features[i];
248
if (feature->first_reg_num <= reg_num && reg_num < (feature->first_reg_num + 3))
249
return feature;
250
}
251
252
return NULL;
253
}
254
255
static struct kvm_sbi_fwft_config *
256
kvm_sbi_fwft_get_config(struct kvm_vcpu *vcpu, enum sbi_fwft_feature_t feature)
257
{
258
int i;
259
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
260
261
for (i = 0; i < ARRAY_SIZE(features); i++) {
262
if (fwft->configs[i].feature->id == feature)
263
return &fwft->configs[i];
264
}
265
266
return NULL;
267
}
268
269
static int kvm_fwft_get_feature(struct kvm_vcpu *vcpu, u32 feature,
270
struct kvm_sbi_fwft_config **conf)
271
{
272
struct kvm_sbi_fwft_config *tconf;
273
274
tconf = kvm_sbi_fwft_get_config(vcpu, feature);
275
if (!tconf) {
276
if (kvm_fwft_is_defined_feature(feature))
277
return SBI_ERR_NOT_SUPPORTED;
278
279
return SBI_ERR_DENIED;
280
}
281
282
if (!tconf->supported || !tconf->enabled)
283
return SBI_ERR_NOT_SUPPORTED;
284
285
*conf = tconf;
286
287
return SBI_SUCCESS;
288
}
289
290
static int kvm_sbi_fwft_set(struct kvm_vcpu *vcpu, u32 feature,
291
unsigned long value, unsigned long flags)
292
{
293
int ret;
294
struct kvm_sbi_fwft_config *conf;
295
296
ret = kvm_fwft_get_feature(vcpu, feature, &conf);
297
if (ret)
298
return ret;
299
300
if ((flags & ~SBI_FWFT_SET_FLAG_LOCK) != 0)
301
return SBI_ERR_INVALID_PARAM;
302
303
if (conf->flags & SBI_FWFT_SET_FLAG_LOCK)
304
return SBI_ERR_DENIED_LOCKED;
305
306
conf->flags = flags;
307
308
return conf->feature->set(vcpu, conf, false, value);
309
}
310
311
static int kvm_sbi_fwft_get(struct kvm_vcpu *vcpu, unsigned long feature,
312
unsigned long *value)
313
{
314
int ret;
315
struct kvm_sbi_fwft_config *conf;
316
317
ret = kvm_fwft_get_feature(vcpu, feature, &conf);
318
if (ret)
319
return ret;
320
321
return conf->feature->get(vcpu, conf, false, value);
322
}
323
324
static int kvm_sbi_ext_fwft_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
325
struct kvm_vcpu_sbi_return *retdata)
326
{
327
int ret;
328
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
329
unsigned long funcid = cp->a6;
330
331
switch (funcid) {
332
case SBI_EXT_FWFT_SET:
333
ret = kvm_sbi_fwft_set(vcpu, cp->a0, cp->a1, cp->a2);
334
break;
335
case SBI_EXT_FWFT_GET:
336
ret = kvm_sbi_fwft_get(vcpu, cp->a0, &retdata->out_val);
337
break;
338
default:
339
ret = SBI_ERR_NOT_SUPPORTED;
340
break;
341
}
342
343
retdata->err_val = ret;
344
345
return 0;
346
}
347
348
static int kvm_sbi_ext_fwft_init(struct kvm_vcpu *vcpu)
349
{
350
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
351
const struct kvm_sbi_fwft_feature *feature;
352
struct kvm_sbi_fwft_config *conf;
353
int i;
354
355
fwft->configs = kcalloc(ARRAY_SIZE(features), sizeof(struct kvm_sbi_fwft_config),
356
GFP_KERNEL);
357
if (!fwft->configs)
358
return -ENOMEM;
359
360
for (i = 0; i < ARRAY_SIZE(features); i++) {
361
feature = &features[i];
362
conf = &fwft->configs[i];
363
if (feature->supported)
364
conf->supported = feature->supported(vcpu);
365
else
366
conf->supported = true;
367
368
conf->enabled = conf->supported;
369
conf->feature = feature;
370
}
371
372
return 0;
373
}
374
375
static void kvm_sbi_ext_fwft_deinit(struct kvm_vcpu *vcpu)
376
{
377
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
378
379
kfree(fwft->configs);
380
}
381
382
static void kvm_sbi_ext_fwft_reset(struct kvm_vcpu *vcpu)
383
{
384
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
385
const struct kvm_sbi_fwft_feature *feature;
386
int i;
387
388
for (i = 0; i < ARRAY_SIZE(features); i++) {
389
fwft->configs[i].flags = 0;
390
feature = &features[i];
391
if (feature->reset)
392
feature->reset(vcpu);
393
}
394
}
395
396
static unsigned long kvm_sbi_ext_fwft_get_reg_count(struct kvm_vcpu *vcpu)
397
{
398
unsigned long max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long);
399
const struct kvm_sbi_fwft_feature *feature;
400
struct kvm_sbi_fwft_config *conf;
401
unsigned long reg, ret = 0;
402
403
for (reg = 0; reg < max_reg_count; reg++) {
404
feature = kvm_sbi_fwft_regnum_to_feature(reg);
405
if (!feature)
406
continue;
407
408
conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
409
if (!conf || !conf->supported)
410
continue;
411
412
ret++;
413
}
414
415
return ret;
416
}
417
418
static int kvm_sbi_ext_fwft_get_reg_id(struct kvm_vcpu *vcpu, int index, u64 *reg_id)
419
{
420
int reg, max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long);
421
const struct kvm_sbi_fwft_feature *feature;
422
struct kvm_sbi_fwft_config *conf;
423
int idx = 0;
424
425
for (reg = 0; reg < max_reg_count; reg++) {
426
feature = kvm_sbi_fwft_regnum_to_feature(reg);
427
if (!feature)
428
continue;
429
430
conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
431
if (!conf || !conf->supported)
432
continue;
433
434
if (index == idx) {
435
*reg_id = KVM_REG_RISCV |
436
(IS_ENABLED(CONFIG_32BIT) ?
437
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
438
KVM_REG_RISCV_SBI_STATE |
439
KVM_REG_RISCV_SBI_FWFT | reg;
440
return 0;
441
}
442
443
idx++;
444
}
445
446
return -ENOENT;
447
}
448
449
static int kvm_sbi_ext_fwft_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
450
unsigned long reg_size, void *reg_val)
451
{
452
const struct kvm_sbi_fwft_feature *feature;
453
struct kvm_sbi_fwft_config *conf;
454
unsigned long *value;
455
int ret = 0;
456
457
if (reg_size != sizeof(unsigned long))
458
return -EINVAL;
459
value = reg_val;
460
461
feature = kvm_sbi_fwft_regnum_to_feature(reg_num);
462
if (!feature)
463
return -ENOENT;
464
465
conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
466
if (!conf || !conf->supported)
467
return -ENOENT;
468
469
switch (reg_num - feature->first_reg_num) {
470
case 0:
471
*value = conf->enabled;
472
break;
473
case 1:
474
*value = conf->flags;
475
break;
476
case 2:
477
ret = conf->feature->get(vcpu, conf, true, value);
478
break;
479
default:
480
return -ENOENT;
481
}
482
483
return sbi_err_map_linux_errno(ret);
484
}
485
486
static int kvm_sbi_ext_fwft_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
487
unsigned long reg_size, const void *reg_val)
488
{
489
const struct kvm_sbi_fwft_feature *feature;
490
struct kvm_sbi_fwft_config *conf;
491
unsigned long value;
492
int ret = 0;
493
494
if (reg_size != sizeof(unsigned long))
495
return -EINVAL;
496
value = *(const unsigned long *)reg_val;
497
498
feature = kvm_sbi_fwft_regnum_to_feature(reg_num);
499
if (!feature)
500
return -ENOENT;
501
502
conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
503
if (!conf || !conf->supported)
504
return -ENOENT;
505
506
switch (reg_num - feature->first_reg_num) {
507
case 0:
508
switch (value) {
509
case 0:
510
conf->enabled = false;
511
break;
512
case 1:
513
conf->enabled = true;
514
break;
515
default:
516
return -EINVAL;
517
}
518
break;
519
case 1:
520
conf->flags = value & SBI_FWFT_SET_FLAG_LOCK;
521
break;
522
case 2:
523
ret = conf->feature->set(vcpu, conf, true, value);
524
break;
525
default:
526
return -ENOENT;
527
}
528
529
return sbi_err_map_linux_errno(ret);
530
}
531
532
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft = {
533
.extid_start = SBI_EXT_FWFT,
534
.extid_end = SBI_EXT_FWFT,
535
.handler = kvm_sbi_ext_fwft_handler,
536
.init = kvm_sbi_ext_fwft_init,
537
.deinit = kvm_sbi_ext_fwft_deinit,
538
.reset = kvm_sbi_ext_fwft_reset,
539
.state_reg_subtype = KVM_REG_RISCV_SBI_FWFT,
540
.get_state_reg_count = kvm_sbi_ext_fwft_get_reg_count,
541
.get_state_reg_id = kvm_sbi_ext_fwft_get_reg_id,
542
.get_state_reg = kvm_sbi_ext_fwft_get_reg,
543
.set_state_reg = kvm_sbi_ext_fwft_set_reg,
544
};
545
546