Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_onereg.c
29521 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4
* Copyright (C) 2023 Ventana Micro Systems Inc.
5
*
6
* Authors:
7
* Anup Patel <[email protected]>
8
*/
9
10
#include <linux/bitops.h>
11
#include <linux/errno.h>
12
#include <linux/err.h>
13
#include <linux/uaccess.h>
14
#include <linux/kvm_host.h>
15
#include <asm/cacheflush.h>
16
#include <asm/cpufeature.h>
17
#include <asm/kvm_vcpu_vector.h>
18
#include <asm/pgtable.h>
19
#include <asm/vector.h>
20
21
#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
22
23
#define KVM_ISA_EXT_ARR(ext) \
24
[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25
26
/* Mapping between KVM ISA Extension ID & guest ISA extension ID */
27
static const unsigned long kvm_isa_ext_arr[] = {
28
/* Single letter extensions (alphabetically sorted) */
29
[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
30
[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
31
[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
32
[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
33
[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
34
[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
35
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
36
[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
37
/* Multi letter extensions (alphabetically sorted) */
38
KVM_ISA_EXT_ARR(SMNPM),
39
KVM_ISA_EXT_ARR(SMSTATEEN),
40
KVM_ISA_EXT_ARR(SSAIA),
41
KVM_ISA_EXT_ARR(SSCOFPMF),
42
KVM_ISA_EXT_ARR(SSNPM),
43
KVM_ISA_EXT_ARR(SSTC),
44
KVM_ISA_EXT_ARR(SVADE),
45
KVM_ISA_EXT_ARR(SVADU),
46
KVM_ISA_EXT_ARR(SVINVAL),
47
KVM_ISA_EXT_ARR(SVNAPOT),
48
KVM_ISA_EXT_ARR(SVPBMT),
49
KVM_ISA_EXT_ARR(SVVPTC),
50
KVM_ISA_EXT_ARR(ZAAMO),
51
KVM_ISA_EXT_ARR(ZABHA),
52
KVM_ISA_EXT_ARR(ZACAS),
53
KVM_ISA_EXT_ARR(ZALRSC),
54
KVM_ISA_EXT_ARR(ZAWRS),
55
KVM_ISA_EXT_ARR(ZBA),
56
KVM_ISA_EXT_ARR(ZBB),
57
KVM_ISA_EXT_ARR(ZBC),
58
KVM_ISA_EXT_ARR(ZBKB),
59
KVM_ISA_EXT_ARR(ZBKC),
60
KVM_ISA_EXT_ARR(ZBKX),
61
KVM_ISA_EXT_ARR(ZBS),
62
KVM_ISA_EXT_ARR(ZCA),
63
KVM_ISA_EXT_ARR(ZCB),
64
KVM_ISA_EXT_ARR(ZCD),
65
KVM_ISA_EXT_ARR(ZCF),
66
KVM_ISA_EXT_ARR(ZCMOP),
67
KVM_ISA_EXT_ARR(ZFA),
68
KVM_ISA_EXT_ARR(ZFBFMIN),
69
KVM_ISA_EXT_ARR(ZFH),
70
KVM_ISA_EXT_ARR(ZFHMIN),
71
KVM_ISA_EXT_ARR(ZICBOM),
72
KVM_ISA_EXT_ARR(ZICBOP),
73
KVM_ISA_EXT_ARR(ZICBOZ),
74
KVM_ISA_EXT_ARR(ZICCRSE),
75
KVM_ISA_EXT_ARR(ZICNTR),
76
KVM_ISA_EXT_ARR(ZICOND),
77
KVM_ISA_EXT_ARR(ZICSR),
78
KVM_ISA_EXT_ARR(ZIFENCEI),
79
KVM_ISA_EXT_ARR(ZIHINTNTL),
80
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
81
KVM_ISA_EXT_ARR(ZIHPM),
82
KVM_ISA_EXT_ARR(ZIMOP),
83
KVM_ISA_EXT_ARR(ZKND),
84
KVM_ISA_EXT_ARR(ZKNE),
85
KVM_ISA_EXT_ARR(ZKNH),
86
KVM_ISA_EXT_ARR(ZKR),
87
KVM_ISA_EXT_ARR(ZKSED),
88
KVM_ISA_EXT_ARR(ZKSH),
89
KVM_ISA_EXT_ARR(ZKT),
90
KVM_ISA_EXT_ARR(ZTSO),
91
KVM_ISA_EXT_ARR(ZVBB),
92
KVM_ISA_EXT_ARR(ZVBC),
93
KVM_ISA_EXT_ARR(ZVFBFMIN),
94
KVM_ISA_EXT_ARR(ZVFBFWMA),
95
KVM_ISA_EXT_ARR(ZVFH),
96
KVM_ISA_EXT_ARR(ZVFHMIN),
97
KVM_ISA_EXT_ARR(ZVKB),
98
KVM_ISA_EXT_ARR(ZVKG),
99
KVM_ISA_EXT_ARR(ZVKNED),
100
KVM_ISA_EXT_ARR(ZVKNHA),
101
KVM_ISA_EXT_ARR(ZVKNHB),
102
KVM_ISA_EXT_ARR(ZVKSED),
103
KVM_ISA_EXT_ARR(ZVKSH),
104
KVM_ISA_EXT_ARR(ZVKT),
105
};
106
107
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
108
{
109
unsigned long i;
110
111
for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
112
if (kvm_isa_ext_arr[i] == base_ext)
113
return i;
114
}
115
116
return KVM_RISCV_ISA_EXT_MAX;
117
}
118
119
static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext)
120
{
121
unsigned long host_ext;
122
123
if (kvm_ext >= KVM_RISCV_ISA_EXT_MAX ||
124
kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
125
return -ENOENT;
126
127
*guest_ext = kvm_isa_ext_arr[kvm_ext];
128
switch (*guest_ext) {
129
case RISCV_ISA_EXT_SMNPM:
130
/*
131
* Pointer masking effective in (H)S-mode is provided by the
132
* Smnpm extension, so that extension is reported to the guest,
133
* even though the CSR bits for configuring VS-mode pointer
134
* masking on the host side are part of the Ssnpm extension.
135
*/
136
host_ext = RISCV_ISA_EXT_SSNPM;
137
break;
138
default:
139
host_ext = *guest_ext;
140
break;
141
}
142
143
if (!__riscv_isa_extension_available(NULL, host_ext))
144
return -ENOENT;
145
146
return 0;
147
}
148
149
static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
150
{
151
switch (ext) {
152
case KVM_RISCV_ISA_EXT_H:
153
return false;
154
case KVM_RISCV_ISA_EXT_SSCOFPMF:
155
/* Sscofpmf depends on interrupt filtering defined in ssaia */
156
return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
157
case KVM_RISCV_ISA_EXT_SVADU:
158
/*
159
* The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
160
* Guest OS can use Svadu only when host OS enable Svadu.
161
*/
162
return arch_has_hw_pte_young();
163
case KVM_RISCV_ISA_EXT_V:
164
return riscv_v_vstate_ctrl_user_allowed();
165
default:
166
break;
167
}
168
169
return true;
170
}
171
172
static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
173
{
174
switch (ext) {
175
/* Extensions which don't have any mechanism to disable */
176
case KVM_RISCV_ISA_EXT_A:
177
case KVM_RISCV_ISA_EXT_C:
178
case KVM_RISCV_ISA_EXT_I:
179
case KVM_RISCV_ISA_EXT_M:
180
/* There is not architectural config bit to disable sscofpmf completely */
181
case KVM_RISCV_ISA_EXT_SSCOFPMF:
182
case KVM_RISCV_ISA_EXT_SSNPM:
183
case KVM_RISCV_ISA_EXT_SSTC:
184
case KVM_RISCV_ISA_EXT_SVINVAL:
185
case KVM_RISCV_ISA_EXT_SVNAPOT:
186
case KVM_RISCV_ISA_EXT_SVVPTC:
187
case KVM_RISCV_ISA_EXT_ZAAMO:
188
case KVM_RISCV_ISA_EXT_ZABHA:
189
case KVM_RISCV_ISA_EXT_ZACAS:
190
case KVM_RISCV_ISA_EXT_ZALRSC:
191
case KVM_RISCV_ISA_EXT_ZAWRS:
192
case KVM_RISCV_ISA_EXT_ZBA:
193
case KVM_RISCV_ISA_EXT_ZBB:
194
case KVM_RISCV_ISA_EXT_ZBC:
195
case KVM_RISCV_ISA_EXT_ZBKB:
196
case KVM_RISCV_ISA_EXT_ZBKC:
197
case KVM_RISCV_ISA_EXT_ZBKX:
198
case KVM_RISCV_ISA_EXT_ZBS:
199
case KVM_RISCV_ISA_EXT_ZCA:
200
case KVM_RISCV_ISA_EXT_ZCB:
201
case KVM_RISCV_ISA_EXT_ZCD:
202
case KVM_RISCV_ISA_EXT_ZCF:
203
case KVM_RISCV_ISA_EXT_ZCMOP:
204
case KVM_RISCV_ISA_EXT_ZFA:
205
case KVM_RISCV_ISA_EXT_ZFBFMIN:
206
case KVM_RISCV_ISA_EXT_ZFH:
207
case KVM_RISCV_ISA_EXT_ZFHMIN:
208
case KVM_RISCV_ISA_EXT_ZICBOP:
209
case KVM_RISCV_ISA_EXT_ZICCRSE:
210
case KVM_RISCV_ISA_EXT_ZICNTR:
211
case KVM_RISCV_ISA_EXT_ZICOND:
212
case KVM_RISCV_ISA_EXT_ZICSR:
213
case KVM_RISCV_ISA_EXT_ZIFENCEI:
214
case KVM_RISCV_ISA_EXT_ZIHINTNTL:
215
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
216
case KVM_RISCV_ISA_EXT_ZIHPM:
217
case KVM_RISCV_ISA_EXT_ZIMOP:
218
case KVM_RISCV_ISA_EXT_ZKND:
219
case KVM_RISCV_ISA_EXT_ZKNE:
220
case KVM_RISCV_ISA_EXT_ZKNH:
221
case KVM_RISCV_ISA_EXT_ZKR:
222
case KVM_RISCV_ISA_EXT_ZKSED:
223
case KVM_RISCV_ISA_EXT_ZKSH:
224
case KVM_RISCV_ISA_EXT_ZKT:
225
case KVM_RISCV_ISA_EXT_ZTSO:
226
case KVM_RISCV_ISA_EXT_ZVBB:
227
case KVM_RISCV_ISA_EXT_ZVBC:
228
case KVM_RISCV_ISA_EXT_ZVFBFMIN:
229
case KVM_RISCV_ISA_EXT_ZVFBFWMA:
230
case KVM_RISCV_ISA_EXT_ZVFH:
231
case KVM_RISCV_ISA_EXT_ZVFHMIN:
232
case KVM_RISCV_ISA_EXT_ZVKB:
233
case KVM_RISCV_ISA_EXT_ZVKG:
234
case KVM_RISCV_ISA_EXT_ZVKNED:
235
case KVM_RISCV_ISA_EXT_ZVKNHA:
236
case KVM_RISCV_ISA_EXT_ZVKNHB:
237
case KVM_RISCV_ISA_EXT_ZVKSED:
238
case KVM_RISCV_ISA_EXT_ZVKSH:
239
case KVM_RISCV_ISA_EXT_ZVKT:
240
return false;
241
/* Extensions which can be disabled using Smstateen */
242
case KVM_RISCV_ISA_EXT_SSAIA:
243
return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
244
case KVM_RISCV_ISA_EXT_SVADE:
245
/*
246
* The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
247
* Svade can't be disabled unless we support Svadu.
248
*/
249
return arch_has_hw_pte_young();
250
default:
251
break;
252
}
253
254
return true;
255
}
256
257
void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
258
{
259
unsigned long guest_ext, i;
260
261
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
262
if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
263
continue;
264
if (kvm_riscv_vcpu_isa_enable_allowed(i))
265
set_bit(guest_ext, vcpu->arch.isa);
266
}
267
}
268
269
static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
270
const struct kvm_one_reg *reg)
271
{
272
unsigned long __user *uaddr =
273
(unsigned long __user *)(unsigned long)reg->addr;
274
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
275
KVM_REG_SIZE_MASK |
276
KVM_REG_RISCV_CONFIG);
277
unsigned long reg_val;
278
279
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
280
return -EINVAL;
281
282
switch (reg_num) {
283
case KVM_REG_RISCV_CONFIG_REG(isa):
284
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
285
break;
286
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
287
if (!riscv_isa_extension_available(NULL, ZICBOM))
288
return -ENOENT;
289
reg_val = riscv_cbom_block_size;
290
break;
291
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
292
if (!riscv_isa_extension_available(NULL, ZICBOZ))
293
return -ENOENT;
294
reg_val = riscv_cboz_block_size;
295
break;
296
case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
297
if (!riscv_isa_extension_available(NULL, ZICBOP))
298
return -ENOENT;
299
reg_val = riscv_cbop_block_size;
300
break;
301
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
302
reg_val = vcpu->arch.mvendorid;
303
break;
304
case KVM_REG_RISCV_CONFIG_REG(marchid):
305
reg_val = vcpu->arch.marchid;
306
break;
307
case KVM_REG_RISCV_CONFIG_REG(mimpid):
308
reg_val = vcpu->arch.mimpid;
309
break;
310
case KVM_REG_RISCV_CONFIG_REG(satp_mode):
311
reg_val = satp_mode >> SATP_MODE_SHIFT;
312
break;
313
default:
314
return -ENOENT;
315
}
316
317
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
318
return -EFAULT;
319
320
return 0;
321
}
322
323
static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
324
const struct kvm_one_reg *reg)
325
{
326
unsigned long __user *uaddr =
327
(unsigned long __user *)(unsigned long)reg->addr;
328
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
329
KVM_REG_SIZE_MASK |
330
KVM_REG_RISCV_CONFIG);
331
unsigned long i, isa_ext, reg_val;
332
333
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
334
return -EINVAL;
335
336
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
337
return -EFAULT;
338
339
switch (reg_num) {
340
case KVM_REG_RISCV_CONFIG_REG(isa):
341
/*
342
* This ONE REG interface is only defined for
343
* single letter extensions.
344
*/
345
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
346
return -EINVAL;
347
348
/*
349
* Return early (i.e. do nothing) if reg_val is the same
350
* value retrievable via kvm_riscv_vcpu_get_reg_config().
351
*/
352
if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
353
break;
354
355
if (!vcpu->arch.ran_atleast_once) {
356
/* Ignore the enable/disable request for certain extensions */
357
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
358
isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
359
if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
360
reg_val &= ~BIT(i);
361
continue;
362
}
363
if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
364
if (reg_val & BIT(i))
365
reg_val &= ~BIT(i);
366
if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
367
if (!(reg_val & BIT(i)))
368
reg_val |= BIT(i);
369
}
370
reg_val &= riscv_isa_extension_base(NULL);
371
/* Do not modify anything beyond single letter extensions */
372
reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
373
(reg_val & KVM_RISCV_BASE_ISA_MASK);
374
vcpu->arch.isa[0] = reg_val;
375
kvm_riscv_vcpu_fp_reset(vcpu);
376
} else {
377
return -EBUSY;
378
}
379
break;
380
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
381
if (!riscv_isa_extension_available(NULL, ZICBOM))
382
return -ENOENT;
383
if (reg_val != riscv_cbom_block_size)
384
return -EINVAL;
385
break;
386
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
387
if (!riscv_isa_extension_available(NULL, ZICBOZ))
388
return -ENOENT;
389
if (reg_val != riscv_cboz_block_size)
390
return -EINVAL;
391
break;
392
case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
393
if (!riscv_isa_extension_available(NULL, ZICBOP))
394
return -ENOENT;
395
if (reg_val != riscv_cbop_block_size)
396
return -EINVAL;
397
break;
398
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
399
if (reg_val == vcpu->arch.mvendorid)
400
break;
401
if (!vcpu->arch.ran_atleast_once)
402
vcpu->arch.mvendorid = reg_val;
403
else
404
return -EBUSY;
405
break;
406
case KVM_REG_RISCV_CONFIG_REG(marchid):
407
if (reg_val == vcpu->arch.marchid)
408
break;
409
if (!vcpu->arch.ran_atleast_once)
410
vcpu->arch.marchid = reg_val;
411
else
412
return -EBUSY;
413
break;
414
case KVM_REG_RISCV_CONFIG_REG(mimpid):
415
if (reg_val == vcpu->arch.mimpid)
416
break;
417
if (!vcpu->arch.ran_atleast_once)
418
vcpu->arch.mimpid = reg_val;
419
else
420
return -EBUSY;
421
break;
422
case KVM_REG_RISCV_CONFIG_REG(satp_mode):
423
if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
424
return -EINVAL;
425
break;
426
default:
427
return -ENOENT;
428
}
429
430
return 0;
431
}
432
433
static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
434
const struct kvm_one_reg *reg)
435
{
436
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
437
unsigned long __user *uaddr =
438
(unsigned long __user *)(unsigned long)reg->addr;
439
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
440
KVM_REG_SIZE_MASK |
441
KVM_REG_RISCV_CORE);
442
unsigned long reg_val;
443
444
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
445
return -EINVAL;
446
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
447
return -ENOENT;
448
449
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
450
reg_val = cntx->sepc;
451
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
452
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
453
reg_val = ((unsigned long *)cntx)[reg_num];
454
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
455
reg_val = (cntx->sstatus & SR_SPP) ?
456
KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
457
else
458
return -ENOENT;
459
460
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
461
return -EFAULT;
462
463
return 0;
464
}
465
466
static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
467
const struct kvm_one_reg *reg)
468
{
469
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
470
unsigned long __user *uaddr =
471
(unsigned long __user *)(unsigned long)reg->addr;
472
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
473
KVM_REG_SIZE_MASK |
474
KVM_REG_RISCV_CORE);
475
unsigned long reg_val;
476
477
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
478
return -EINVAL;
479
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
480
return -ENOENT;
481
482
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
483
return -EFAULT;
484
485
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
486
cntx->sepc = reg_val;
487
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
488
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
489
((unsigned long *)cntx)[reg_num] = reg_val;
490
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
491
if (reg_val == KVM_RISCV_MODE_S)
492
cntx->sstatus |= SR_SPP;
493
else
494
cntx->sstatus &= ~SR_SPP;
495
} else
496
return -ENOENT;
497
498
return 0;
499
}
500
501
static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
502
unsigned long reg_num,
503
unsigned long *out_val)
504
{
505
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
506
507
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
508
return -ENOENT;
509
510
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
511
kvm_riscv_vcpu_flush_interrupts(vcpu);
512
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
513
*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
514
} else
515
*out_val = ((unsigned long *)csr)[reg_num];
516
517
return 0;
518
}
519
520
static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
521
unsigned long reg_num,
522
unsigned long reg_val)
523
{
524
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
525
526
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
527
return -ENOENT;
528
529
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
530
reg_val &= VSIP_VALID_MASK;
531
reg_val <<= VSIP_TO_HVIP_SHIFT;
532
}
533
534
((unsigned long *)csr)[reg_num] = reg_val;
535
536
if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
537
WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
538
539
return 0;
540
}
541
542
static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
543
unsigned long reg_num,
544
unsigned long reg_val)
545
{
546
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
547
548
if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
549
sizeof(unsigned long))
550
return -EINVAL;
551
552
((unsigned long *)csr)[reg_num] = reg_val;
553
return 0;
554
}
555
556
static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
557
unsigned long reg_num,
558
unsigned long *out_val)
559
{
560
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
561
562
if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
563
sizeof(unsigned long))
564
return -EINVAL;
565
566
*out_val = ((unsigned long *)csr)[reg_num];
567
return 0;
568
}
569
570
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
571
const struct kvm_one_reg *reg)
572
{
573
int rc;
574
unsigned long __user *uaddr =
575
(unsigned long __user *)(unsigned long)reg->addr;
576
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
577
KVM_REG_SIZE_MASK |
578
KVM_REG_RISCV_CSR);
579
unsigned long reg_val, reg_subtype;
580
581
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
582
return -EINVAL;
583
584
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
585
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
586
switch (reg_subtype) {
587
case KVM_REG_RISCV_CSR_GENERAL:
588
rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
589
break;
590
case KVM_REG_RISCV_CSR_AIA:
591
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
592
break;
593
case KVM_REG_RISCV_CSR_SMSTATEEN:
594
rc = -EINVAL;
595
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
596
rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
597
&reg_val);
598
break;
599
default:
600
rc = -ENOENT;
601
break;
602
}
603
if (rc)
604
return rc;
605
606
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
607
return -EFAULT;
608
609
return 0;
610
}
611
612
static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
613
const struct kvm_one_reg *reg)
614
{
615
int rc;
616
unsigned long __user *uaddr =
617
(unsigned long __user *)(unsigned long)reg->addr;
618
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
619
KVM_REG_SIZE_MASK |
620
KVM_REG_RISCV_CSR);
621
unsigned long reg_val, reg_subtype;
622
623
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
624
return -EINVAL;
625
626
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
627
return -EFAULT;
628
629
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
630
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
631
switch (reg_subtype) {
632
case KVM_REG_RISCV_CSR_GENERAL:
633
rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
634
break;
635
case KVM_REG_RISCV_CSR_AIA:
636
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
637
break;
638
case KVM_REG_RISCV_CSR_SMSTATEEN:
639
rc = -EINVAL;
640
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
641
rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
642
reg_val);
643
break;
644
default:
645
rc = -ENOENT;
646
break;
647
}
648
if (rc)
649
return rc;
650
651
return 0;
652
}
653
654
static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
655
unsigned long reg_num,
656
unsigned long *reg_val)
657
{
658
unsigned long guest_ext;
659
int ret;
660
661
ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
662
if (ret)
663
return ret;
664
665
*reg_val = 0;
666
if (__riscv_isa_extension_available(vcpu->arch.isa, guest_ext))
667
*reg_val = 1; /* Mark the given extension as available */
668
669
return 0;
670
}
671
672
static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
673
unsigned long reg_num,
674
unsigned long reg_val)
675
{
676
unsigned long guest_ext;
677
int ret;
678
679
ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
680
if (ret)
681
return ret;
682
683
if (reg_val == test_bit(guest_ext, vcpu->arch.isa))
684
return 0;
685
686
if (!vcpu->arch.ran_atleast_once) {
687
/*
688
* All multi-letter extension and a few single letter
689
* extension can be disabled
690
*/
691
if (reg_val == 1 &&
692
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
693
set_bit(guest_ext, vcpu->arch.isa);
694
else if (!reg_val &&
695
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
696
clear_bit(guest_ext, vcpu->arch.isa);
697
else
698
return -EINVAL;
699
kvm_riscv_vcpu_fp_reset(vcpu);
700
} else {
701
return -EBUSY;
702
}
703
704
return 0;
705
}
706
707
static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
708
unsigned long reg_num,
709
unsigned long *reg_val)
710
{
711
unsigned long i, ext_id, ext_val;
712
713
if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
714
return -ENOENT;
715
716
for (i = 0; i < BITS_PER_LONG; i++) {
717
ext_id = i + reg_num * BITS_PER_LONG;
718
if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
719
break;
720
721
ext_val = 0;
722
riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
723
if (ext_val)
724
*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
725
}
726
727
return 0;
728
}
729
730
static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
731
unsigned long reg_num,
732
unsigned long reg_val, bool enable)
733
{
734
unsigned long i, ext_id;
735
736
if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
737
return -ENOENT;
738
739
for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
740
ext_id = i + reg_num * BITS_PER_LONG;
741
if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
742
break;
743
744
riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
745
}
746
747
return 0;
748
}
749
750
static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
751
const struct kvm_one_reg *reg)
752
{
753
int rc;
754
unsigned long __user *uaddr =
755
(unsigned long __user *)(unsigned long)reg->addr;
756
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
757
KVM_REG_SIZE_MASK |
758
KVM_REG_RISCV_ISA_EXT);
759
unsigned long reg_val, reg_subtype;
760
761
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
762
return -EINVAL;
763
764
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
765
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
766
767
reg_val = 0;
768
switch (reg_subtype) {
769
case KVM_REG_RISCV_ISA_SINGLE:
770
rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
771
break;
772
case KVM_REG_RISCV_ISA_MULTI_EN:
773
case KVM_REG_RISCV_ISA_MULTI_DIS:
774
rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
775
if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
776
reg_val = ~reg_val;
777
break;
778
default:
779
rc = -ENOENT;
780
}
781
if (rc)
782
return rc;
783
784
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
785
return -EFAULT;
786
787
return 0;
788
}
789
790
static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
791
const struct kvm_one_reg *reg)
792
{
793
unsigned long __user *uaddr =
794
(unsigned long __user *)(unsigned long)reg->addr;
795
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
796
KVM_REG_SIZE_MASK |
797
KVM_REG_RISCV_ISA_EXT);
798
unsigned long reg_val, reg_subtype;
799
800
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
801
return -EINVAL;
802
803
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
804
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
805
806
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
807
return -EFAULT;
808
809
switch (reg_subtype) {
810
case KVM_REG_RISCV_ISA_SINGLE:
811
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
812
case KVM_REG_RISCV_ISA_MULTI_EN:
813
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
814
case KVM_REG_RISCV_ISA_MULTI_DIS:
815
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
816
default:
817
return -ENOENT;
818
}
819
820
return 0;
821
}
822
823
static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
824
u64 __user *uindices)
825
{
826
int n = 0;
827
828
for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
829
i++) {
830
u64 size;
831
u64 reg;
832
833
/*
834
* Avoid reporting config reg if the corresponding extension
835
* was not available.
836
*/
837
if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
838
!riscv_isa_extension_available(NULL, ZICBOM))
839
continue;
840
else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
841
!riscv_isa_extension_available(NULL, ZICBOZ))
842
continue;
843
else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) &&
844
!riscv_isa_extension_available(NULL, ZICBOP))
845
continue;
846
847
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
848
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
849
850
if (uindices) {
851
if (put_user(reg, uindices))
852
return -EFAULT;
853
uindices++;
854
}
855
856
n++;
857
}
858
859
return n;
860
}
861
862
static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
863
{
864
return copy_config_reg_indices(vcpu, NULL);
865
}
866
867
static inline unsigned long num_core_regs(void)
868
{
869
return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
870
}
871
872
static int copy_core_reg_indices(u64 __user *uindices)
873
{
874
int n = num_core_regs();
875
876
for (int i = 0; i < n; i++) {
877
u64 size = IS_ENABLED(CONFIG_32BIT) ?
878
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
879
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
880
881
if (uindices) {
882
if (put_user(reg, uindices))
883
return -EFAULT;
884
uindices++;
885
}
886
}
887
888
return n;
889
}
890
891
static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
892
{
893
unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
894
895
if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
896
n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
897
if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
898
n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
899
900
return n;
901
}
902
903
static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
904
u64 __user *uindices)
905
{
906
int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
907
int n2 = 0, n3 = 0;
908
909
/* copy general csr regs */
910
for (int i = 0; i < n1; i++) {
911
u64 size = IS_ENABLED(CONFIG_32BIT) ?
912
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
913
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
914
KVM_REG_RISCV_CSR_GENERAL | i;
915
916
if (uindices) {
917
if (put_user(reg, uindices))
918
return -EFAULT;
919
uindices++;
920
}
921
}
922
923
/* copy AIA csr regs */
924
if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
925
n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
926
927
for (int i = 0; i < n2; i++) {
928
u64 size = IS_ENABLED(CONFIG_32BIT) ?
929
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
930
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
931
KVM_REG_RISCV_CSR_AIA | i;
932
933
if (uindices) {
934
if (put_user(reg, uindices))
935
return -EFAULT;
936
uindices++;
937
}
938
}
939
}
940
941
/* copy Smstateen csr regs */
942
if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
943
n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
944
945
for (int i = 0; i < n3; i++) {
946
u64 size = IS_ENABLED(CONFIG_32BIT) ?
947
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
948
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
949
KVM_REG_RISCV_CSR_SMSTATEEN | i;
950
951
if (uindices) {
952
if (put_user(reg, uindices))
953
return -EFAULT;
954
uindices++;
955
}
956
}
957
}
958
959
return n1 + n2 + n3;
960
}
961
962
static inline unsigned long num_timer_regs(void)
963
{
964
return sizeof(struct kvm_riscv_timer) / sizeof(u64);
965
}
966
967
static int copy_timer_reg_indices(u64 __user *uindices)
968
{
969
int n = num_timer_regs();
970
971
for (int i = 0; i < n; i++) {
972
u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
973
KVM_REG_RISCV_TIMER | i;
974
975
if (uindices) {
976
if (put_user(reg, uindices))
977
return -EFAULT;
978
uindices++;
979
}
980
}
981
982
return n;
983
}
984
985
static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
986
{
987
const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
988
989
if (riscv_isa_extension_available(vcpu->arch.isa, f))
990
return sizeof(cntx->fp.f) / sizeof(u32);
991
else
992
return 0;
993
}
994
995
static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
996
u64 __user *uindices)
997
{
998
int n = num_fp_f_regs(vcpu);
999
1000
for (int i = 0; i < n; i++) {
1001
u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
1002
KVM_REG_RISCV_FP_F | i;
1003
1004
if (uindices) {
1005
if (put_user(reg, uindices))
1006
return -EFAULT;
1007
uindices++;
1008
}
1009
}
1010
1011
return n;
1012
}
1013
1014
static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
1015
{
1016
const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1017
1018
if (riscv_isa_extension_available(vcpu->arch.isa, d))
1019
return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
1020
else
1021
return 0;
1022
}
1023
1024
static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
1025
u64 __user *uindices)
1026
{
1027
int i;
1028
int n = num_fp_d_regs(vcpu);
1029
u64 reg;
1030
1031
/* copy fp.d.f indices */
1032
for (i = 0; i < n-1; i++) {
1033
reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
1034
KVM_REG_RISCV_FP_D | i;
1035
1036
if (uindices) {
1037
if (put_user(reg, uindices))
1038
return -EFAULT;
1039
uindices++;
1040
}
1041
}
1042
1043
/* copy fp.d.fcsr indices */
1044
reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
1045
if (uindices) {
1046
if (put_user(reg, uindices))
1047
return -EFAULT;
1048
uindices++;
1049
}
1050
1051
return n;
1052
}
1053
1054
static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1055
u64 __user *uindices)
1056
{
1057
unsigned long guest_ext;
1058
unsigned int n = 0;
1059
1060
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1061
u64 size = IS_ENABLED(CONFIG_32BIT) ?
1062
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1063
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1064
1065
if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
1066
continue;
1067
1068
if (uindices) {
1069
if (put_user(reg, uindices))
1070
return -EFAULT;
1071
uindices++;
1072
}
1073
1074
n++;
1075
}
1076
1077
return n;
1078
}
1079
1080
static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1081
{
1082
return copy_isa_ext_reg_indices(vcpu, NULL);
1083
}
1084
1085
static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1086
{
1087
return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL);
1088
}
1089
1090
static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1091
{
1092
return kvm_riscv_vcpu_reg_indices_sbi(vcpu, NULL);
1093
}
1094
1095
static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1096
{
1097
if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1098
return 0;
1099
1100
/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1101
return 37;
1102
}
1103
1104
static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1105
u64 __user *uindices)
1106
{
1107
const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1108
int n = num_vector_regs(vcpu);
1109
u64 reg, size;
1110
int i;
1111
1112
if (n == 0)
1113
return 0;
1114
1115
/* copy vstart, vl, vtype, vcsr and vlenb */
1116
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1117
for (i = 0; i < 5; i++) {
1118
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1119
1120
if (uindices) {
1121
if (put_user(reg, uindices))
1122
return -EFAULT;
1123
uindices++;
1124
}
1125
}
1126
1127
/* vector_regs have a variable 'vlenb' size */
1128
size = __builtin_ctzl(cntx->vector.vlenb);
1129
size <<= KVM_REG_SIZE_SHIFT;
1130
for (i = 0; i < 32; i++) {
1131
reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1132
KVM_REG_RISCV_VECTOR_REG(i);
1133
1134
if (uindices) {
1135
if (put_user(reg, uindices))
1136
return -EFAULT;
1137
uindices++;
1138
}
1139
}
1140
1141
return n;
1142
}
1143
1144
/*
1145
* kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1146
*
1147
* This is for all registers.
1148
*/
1149
unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1150
{
1151
unsigned long res = 0;
1152
1153
res += num_config_regs(vcpu);
1154
res += num_core_regs();
1155
res += num_csr_regs(vcpu);
1156
res += num_timer_regs();
1157
res += num_fp_f_regs(vcpu);
1158
res += num_fp_d_regs(vcpu);
1159
res += num_vector_regs(vcpu);
1160
res += num_isa_ext_regs(vcpu);
1161
res += num_sbi_ext_regs(vcpu);
1162
res += num_sbi_regs(vcpu);
1163
1164
return res;
1165
}
1166
1167
/*
1168
* kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1169
*/
1170
int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1171
u64 __user *uindices)
1172
{
1173
int ret;
1174
1175
ret = copy_config_reg_indices(vcpu, uindices);
1176
if (ret < 0)
1177
return ret;
1178
uindices += ret;
1179
1180
ret = copy_core_reg_indices(uindices);
1181
if (ret < 0)
1182
return ret;
1183
uindices += ret;
1184
1185
ret = copy_csr_reg_indices(vcpu, uindices);
1186
if (ret < 0)
1187
return ret;
1188
uindices += ret;
1189
1190
ret = copy_timer_reg_indices(uindices);
1191
if (ret < 0)
1192
return ret;
1193
uindices += ret;
1194
1195
ret = copy_fp_f_reg_indices(vcpu, uindices);
1196
if (ret < 0)
1197
return ret;
1198
uindices += ret;
1199
1200
ret = copy_fp_d_reg_indices(vcpu, uindices);
1201
if (ret < 0)
1202
return ret;
1203
uindices += ret;
1204
1205
ret = copy_vector_reg_indices(vcpu, uindices);
1206
if (ret < 0)
1207
return ret;
1208
uindices += ret;
1209
1210
ret = copy_isa_ext_reg_indices(vcpu, uindices);
1211
if (ret < 0)
1212
return ret;
1213
uindices += ret;
1214
1215
ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices);
1216
if (ret < 0)
1217
return ret;
1218
uindices += ret;
1219
1220
ret = kvm_riscv_vcpu_reg_indices_sbi(vcpu, uindices);
1221
if (ret < 0)
1222
return ret;
1223
uindices += ret;
1224
1225
return 0;
1226
}
1227
1228
int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1229
const struct kvm_one_reg *reg)
1230
{
1231
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1232
case KVM_REG_RISCV_CONFIG:
1233
return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1234
case KVM_REG_RISCV_CORE:
1235
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1236
case KVM_REG_RISCV_CSR:
1237
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1238
case KVM_REG_RISCV_TIMER:
1239
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1240
case KVM_REG_RISCV_FP_F:
1241
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1242
KVM_REG_RISCV_FP_F);
1243
case KVM_REG_RISCV_FP_D:
1244
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1245
KVM_REG_RISCV_FP_D);
1246
case KVM_REG_RISCV_VECTOR:
1247
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1248
case KVM_REG_RISCV_ISA_EXT:
1249
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1250
case KVM_REG_RISCV_SBI_EXT:
1251
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1252
case KVM_REG_RISCV_SBI_STATE:
1253
return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1254
default:
1255
break;
1256
}
1257
1258
return -ENOENT;
1259
}
1260
1261
int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1262
const struct kvm_one_reg *reg)
1263
{
1264
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1265
case KVM_REG_RISCV_CONFIG:
1266
return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1267
case KVM_REG_RISCV_CORE:
1268
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1269
case KVM_REG_RISCV_CSR:
1270
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1271
case KVM_REG_RISCV_TIMER:
1272
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1273
case KVM_REG_RISCV_FP_F:
1274
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1275
KVM_REG_RISCV_FP_F);
1276
case KVM_REG_RISCV_FP_D:
1277
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1278
KVM_REG_RISCV_FP_D);
1279
case KVM_REG_RISCV_VECTOR:
1280
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1281
case KVM_REG_RISCV_ISA_EXT:
1282
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1283
case KVM_REG_RISCV_SBI_EXT:
1284
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1285
case KVM_REG_RISCV_SBI_STATE:
1286
return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1287
default:
1288
break;
1289
}
1290
1291
return -ENOENT;
1292
}
1293
1294