Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu_sbi.c
29521 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
4
*
5
* Authors:
6
* Atish Patra <[email protected]>
7
*/
8
9
#include <linux/errno.h>
10
#include <linux/err.h>
11
#include <linux/kvm_host.h>
12
#include <asm/sbi.h>
13
#include <asm/kvm_vcpu_sbi.h>
14
15
#ifndef CONFIG_RISCV_SBI_V01
16
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17
.extid_start = -1UL,
18
.extid_end = -1UL,
19
.handler = NULL,
20
};
21
#endif
22
23
#ifndef CONFIG_RISCV_PMU_SBI
24
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25
.extid_start = -1UL,
26
.extid_end = -1UL,
27
.handler = NULL,
28
};
29
#endif
30
31
struct kvm_riscv_sbi_extension_entry {
32
enum KVM_RISCV_SBI_EXT_ID ext_idx;
33
const struct kvm_vcpu_sbi_extension *ext_ptr;
34
};
35
36
static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37
{
38
.ext_idx = KVM_RISCV_SBI_EXT_V01,
39
.ext_ptr = &vcpu_sbi_ext_v01,
40
},
41
{
42
.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43
.ext_ptr = &vcpu_sbi_ext_base,
44
},
45
{
46
.ext_idx = KVM_RISCV_SBI_EXT_TIME,
47
.ext_ptr = &vcpu_sbi_ext_time,
48
},
49
{
50
.ext_idx = KVM_RISCV_SBI_EXT_IPI,
51
.ext_ptr = &vcpu_sbi_ext_ipi,
52
},
53
{
54
.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55
.ext_ptr = &vcpu_sbi_ext_rfence,
56
},
57
{
58
.ext_idx = KVM_RISCV_SBI_EXT_SRST,
59
.ext_ptr = &vcpu_sbi_ext_srst,
60
},
61
{
62
.ext_idx = KVM_RISCV_SBI_EXT_HSM,
63
.ext_ptr = &vcpu_sbi_ext_hsm,
64
},
65
{
66
.ext_idx = KVM_RISCV_SBI_EXT_PMU,
67
.ext_ptr = &vcpu_sbi_ext_pmu,
68
},
69
{
70
.ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71
.ext_ptr = &vcpu_sbi_ext_dbcn,
72
},
73
{
74
.ext_idx = KVM_RISCV_SBI_EXT_SUSP,
75
.ext_ptr = &vcpu_sbi_ext_susp,
76
},
77
{
78
.ext_idx = KVM_RISCV_SBI_EXT_STA,
79
.ext_ptr = &vcpu_sbi_ext_sta,
80
},
81
{
82
.ext_idx = KVM_RISCV_SBI_EXT_FWFT,
83
.ext_ptr = &vcpu_sbi_ext_fwft,
84
},
85
{
86
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
87
.ext_ptr = &vcpu_sbi_ext_experimental,
88
},
89
{
90
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
91
.ext_ptr = &vcpu_sbi_ext_vendor,
92
},
93
};
94
95
static const struct kvm_riscv_sbi_extension_entry *
96
riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
97
{
98
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
99
100
if (idx >= KVM_RISCV_SBI_EXT_MAX)
101
return NULL;
102
103
for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
104
if (sbi_ext[i].ext_idx == idx) {
105
sext = &sbi_ext[i];
106
break;
107
}
108
}
109
110
return sext;
111
}
112
113
static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
114
{
115
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
116
const struct kvm_riscv_sbi_extension_entry *sext;
117
118
sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
119
120
return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
121
}
122
123
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
124
{
125
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
126
127
vcpu->arch.sbi_context.return_handled = 0;
128
vcpu->stat.ecall_exit_stat++;
129
run->exit_reason = KVM_EXIT_RISCV_SBI;
130
run->riscv_sbi.extension_id = cp->a7;
131
run->riscv_sbi.function_id = cp->a6;
132
run->riscv_sbi.args[0] = cp->a0;
133
run->riscv_sbi.args[1] = cp->a1;
134
run->riscv_sbi.args[2] = cp->a2;
135
run->riscv_sbi.args[3] = cp->a3;
136
run->riscv_sbi.args[4] = cp->a4;
137
run->riscv_sbi.args[5] = cp->a5;
138
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
139
run->riscv_sbi.ret[1] = 0;
140
}
141
142
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
143
struct kvm_run *run,
144
u32 type, u64 reason)
145
{
146
unsigned long i;
147
struct kvm_vcpu *tmp;
148
149
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
150
spin_lock(&tmp->arch.mp_state_lock);
151
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
152
spin_unlock(&tmp->arch.mp_state_lock);
153
}
154
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
155
156
memset(&run->system_event, 0, sizeof(run->system_event));
157
run->system_event.type = type;
158
run->system_event.ndata = 1;
159
run->system_event.data[0] = reason;
160
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
161
}
162
163
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
164
unsigned long pc, unsigned long a1)
165
{
166
spin_lock(&vcpu->arch.reset_state.lock);
167
vcpu->arch.reset_state.pc = pc;
168
vcpu->arch.reset_state.a1 = a1;
169
spin_unlock(&vcpu->arch.reset_state.lock);
170
171
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
172
}
173
174
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
175
{
176
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
177
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
178
struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
179
180
cntx->a0 = vcpu->vcpu_id;
181
182
spin_lock(&vcpu->arch.reset_state.lock);
183
cntx->sepc = reset_state->pc;
184
cntx->a1 = reset_state->a1;
185
spin_unlock(&vcpu->arch.reset_state.lock);
186
187
cntx->sstatus &= ~SR_SIE;
188
csr->vsatp = 0;
189
}
190
191
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
192
{
193
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
194
195
/* Handle SBI return only once */
196
if (vcpu->arch.sbi_context.return_handled)
197
return 0;
198
vcpu->arch.sbi_context.return_handled = 1;
199
200
/* Update return values */
201
cp->a0 = run->riscv_sbi.ret[0];
202
cp->a1 = run->riscv_sbi.ret[1];
203
204
/* Move to next instruction */
205
vcpu->arch.guest_context.sepc += 4;
206
207
return 0;
208
}
209
210
static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
211
unsigned long reg_num,
212
unsigned long reg_val)
213
{
214
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
215
const struct kvm_riscv_sbi_extension_entry *sext;
216
217
if (reg_val != 1 && reg_val != 0)
218
return -EINVAL;
219
220
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
221
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
222
return -ENOENT;
223
224
scontext->ext_status[sext->ext_idx] = (reg_val) ?
225
KVM_RISCV_SBI_EXT_STATUS_ENABLED :
226
KVM_RISCV_SBI_EXT_STATUS_DISABLED;
227
228
return 0;
229
}
230
231
static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
232
unsigned long reg_num,
233
unsigned long *reg_val)
234
{
235
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
236
const struct kvm_riscv_sbi_extension_entry *sext;
237
238
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
239
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
240
return -ENOENT;
241
242
*reg_val = scontext->ext_status[sext->ext_idx] ==
243
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
244
245
return 0;
246
}
247
248
static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
249
unsigned long reg_num,
250
unsigned long reg_val, bool enable)
251
{
252
unsigned long i, ext_id;
253
254
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
255
return -ENOENT;
256
257
for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
258
ext_id = i + reg_num * BITS_PER_LONG;
259
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
260
break;
261
262
riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
263
}
264
265
return 0;
266
}
267
268
static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
269
unsigned long reg_num,
270
unsigned long *reg_val)
271
{
272
unsigned long i, ext_id, ext_val;
273
274
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
275
return -ENOENT;
276
277
for (i = 0; i < BITS_PER_LONG; i++) {
278
ext_id = i + reg_num * BITS_PER_LONG;
279
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
280
break;
281
282
ext_val = 0;
283
riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
284
if (ext_val)
285
*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
286
}
287
288
return 0;
289
}
290
291
int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices)
292
{
293
unsigned int n = 0;
294
295
for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
296
u64 size = IS_ENABLED(CONFIG_32BIT) ?
297
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
298
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
299
KVM_REG_RISCV_SBI_SINGLE | i;
300
301
if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
302
continue;
303
304
if (uindices) {
305
if (put_user(reg, uindices))
306
return -EFAULT;
307
uindices++;
308
}
309
310
n++;
311
}
312
313
return n;
314
}
315
316
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
317
const struct kvm_one_reg *reg)
318
{
319
unsigned long __user *uaddr =
320
(unsigned long __user *)(unsigned long)reg->addr;
321
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
322
KVM_REG_SIZE_MASK |
323
KVM_REG_RISCV_SBI_EXT);
324
unsigned long reg_val, reg_subtype;
325
326
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
327
return -EINVAL;
328
329
if (vcpu->arch.ran_atleast_once)
330
return -EBUSY;
331
332
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
333
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
334
335
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
336
return -EFAULT;
337
338
switch (reg_subtype) {
339
case KVM_REG_RISCV_SBI_SINGLE:
340
return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
341
case KVM_REG_RISCV_SBI_MULTI_EN:
342
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
343
case KVM_REG_RISCV_SBI_MULTI_DIS:
344
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
345
default:
346
return -ENOENT;
347
}
348
349
return 0;
350
}
351
352
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
353
const struct kvm_one_reg *reg)
354
{
355
int rc;
356
unsigned long __user *uaddr =
357
(unsigned long __user *)(unsigned long)reg->addr;
358
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
359
KVM_REG_SIZE_MASK |
360
KVM_REG_RISCV_SBI_EXT);
361
unsigned long reg_val, reg_subtype;
362
363
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
364
return -EINVAL;
365
366
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
367
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
368
369
reg_val = 0;
370
switch (reg_subtype) {
371
case KVM_REG_RISCV_SBI_SINGLE:
372
rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
373
break;
374
case KVM_REG_RISCV_SBI_MULTI_EN:
375
case KVM_REG_RISCV_SBI_MULTI_DIS:
376
rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
377
if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
378
reg_val = ~reg_val;
379
break;
380
default:
381
rc = -ENOENT;
382
}
383
if (rc)
384
return rc;
385
386
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
387
return -EFAULT;
388
389
return 0;
390
}
391
392
int kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu *vcpu, u64 __user *uindices)
393
{
394
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
395
const struct kvm_riscv_sbi_extension_entry *entry;
396
const struct kvm_vcpu_sbi_extension *ext;
397
unsigned long state_reg_count;
398
int i, j, rc, count = 0;
399
u64 reg;
400
401
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
402
entry = &sbi_ext[i];
403
ext = entry->ext_ptr;
404
405
if (!ext->get_state_reg_count ||
406
scontext->ext_status[entry->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED)
407
continue;
408
409
state_reg_count = ext->get_state_reg_count(vcpu);
410
if (!uindices)
411
goto skip_put_user;
412
413
for (j = 0; j < state_reg_count; j++) {
414
if (ext->get_state_reg_id) {
415
rc = ext->get_state_reg_id(vcpu, j, &reg);
416
if (rc)
417
return rc;
418
} else {
419
reg = KVM_REG_RISCV |
420
(IS_ENABLED(CONFIG_32BIT) ?
421
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
422
KVM_REG_RISCV_SBI_STATE |
423
ext->state_reg_subtype | j;
424
}
425
426
if (put_user(reg, uindices))
427
return -EFAULT;
428
uindices++;
429
}
430
431
skip_put_user:
432
count += state_reg_count;
433
}
434
435
return count;
436
}
437
438
static const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext_withstate(struct kvm_vcpu *vcpu,
439
unsigned long subtype)
440
{
441
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
442
const struct kvm_riscv_sbi_extension_entry *entry;
443
const struct kvm_vcpu_sbi_extension *ext;
444
int i;
445
446
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
447
entry = &sbi_ext[i];
448
ext = entry->ext_ptr;
449
450
if (ext->get_state_reg_count &&
451
ext->state_reg_subtype == subtype &&
452
scontext->ext_status[entry->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_ENABLED)
453
return ext;
454
}
455
456
return NULL;
457
}
458
459
int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
460
{
461
unsigned long __user *uaddr =
462
(unsigned long __user *)(unsigned long)reg->addr;
463
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
464
KVM_REG_SIZE_MASK |
465
KVM_REG_RISCV_SBI_STATE);
466
const struct kvm_vcpu_sbi_extension *ext;
467
unsigned long reg_subtype;
468
void *reg_val;
469
u64 data64;
470
u32 data32;
471
u16 data16;
472
u8 data8;
473
474
switch (KVM_REG_SIZE(reg->id)) {
475
case 1:
476
reg_val = &data8;
477
break;
478
case 2:
479
reg_val = &data16;
480
break;
481
case 4:
482
reg_val = &data32;
483
break;
484
case 8:
485
reg_val = &data64;
486
break;
487
default:
488
return -EINVAL;
489
}
490
491
if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
492
return -EFAULT;
493
494
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
495
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
496
497
ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
498
if (!ext || !ext->set_state_reg)
499
return -EINVAL;
500
501
return ext->set_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
502
}
503
504
int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
505
{
506
unsigned long __user *uaddr =
507
(unsigned long __user *)(unsigned long)reg->addr;
508
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
509
KVM_REG_SIZE_MASK |
510
KVM_REG_RISCV_SBI_STATE);
511
const struct kvm_vcpu_sbi_extension *ext;
512
unsigned long reg_subtype;
513
void *reg_val;
514
u64 data64;
515
u32 data32;
516
u16 data16;
517
u8 data8;
518
int ret;
519
520
switch (KVM_REG_SIZE(reg->id)) {
521
case 1:
522
reg_val = &data8;
523
break;
524
case 2:
525
reg_val = &data16;
526
break;
527
case 4:
528
reg_val = &data32;
529
break;
530
case 8:
531
reg_val = &data64;
532
break;
533
default:
534
return -EINVAL;
535
}
536
537
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
538
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
539
540
ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
541
if (!ext || !ext->get_state_reg)
542
return -EINVAL;
543
544
ret = ext->get_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
545
if (ret)
546
return ret;
547
548
if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
549
return -EFAULT;
550
551
return 0;
552
}
553
554
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
555
struct kvm_vcpu *vcpu, unsigned long extid)
556
{
557
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
558
const struct kvm_riscv_sbi_extension_entry *entry;
559
const struct kvm_vcpu_sbi_extension *ext;
560
int i;
561
562
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
563
entry = &sbi_ext[i];
564
ext = entry->ext_ptr;
565
566
if (ext->extid_start <= extid && ext->extid_end >= extid) {
567
if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
568
scontext->ext_status[entry->ext_idx] ==
569
KVM_RISCV_SBI_EXT_STATUS_ENABLED)
570
return ext;
571
572
return NULL;
573
}
574
}
575
576
return NULL;
577
}
578
579
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
580
{
581
int ret = 1;
582
bool next_sepc = true;
583
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
584
const struct kvm_vcpu_sbi_extension *sbi_ext;
585
struct kvm_cpu_trap utrap = {0};
586
struct kvm_vcpu_sbi_return sbi_ret = {
587
.out_val = 0,
588
.err_val = 0,
589
.utrap = &utrap,
590
};
591
bool ext_is_v01 = false;
592
593
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
594
if (sbi_ext && sbi_ext->handler) {
595
#ifdef CONFIG_RISCV_SBI_V01
596
if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
597
cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
598
ext_is_v01 = true;
599
#endif
600
ret = sbi_ext->handler(vcpu, run, &sbi_ret);
601
} else {
602
/* Return error for unsupported SBI calls */
603
cp->a0 = SBI_ERR_NOT_SUPPORTED;
604
goto ecall_done;
605
}
606
607
/*
608
* When the SBI extension returns a Linux error code, it exits the ioctl
609
* loop and forwards the error to userspace.
610
*/
611
if (ret < 0) {
612
next_sepc = false;
613
goto ecall_done;
614
}
615
616
/* Handle special error cases i.e trap, exit or userspace forward */
617
if (sbi_ret.utrap->scause) {
618
/* No need to increment sepc or exit ioctl loop */
619
ret = 1;
620
sbi_ret.utrap->sepc = cp->sepc;
621
kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
622
next_sepc = false;
623
goto ecall_done;
624
}
625
626
/* Exit ioctl loop or Propagate the error code the guest */
627
if (sbi_ret.uexit) {
628
next_sepc = false;
629
ret = 0;
630
} else {
631
cp->a0 = sbi_ret.err_val;
632
ret = 1;
633
}
634
ecall_done:
635
if (next_sepc)
636
cp->sepc += 4;
637
/* a1 should only be updated when we continue the ioctl loop */
638
if (!ext_is_v01 && ret == 1)
639
cp->a1 = sbi_ret.out_val;
640
641
return ret;
642
}
643
644
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
645
{
646
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
647
const struct kvm_riscv_sbi_extension_entry *entry;
648
const struct kvm_vcpu_sbi_extension *ext;
649
int idx, i;
650
651
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
652
entry = &sbi_ext[i];
653
ext = entry->ext_ptr;
654
idx = entry->ext_idx;
655
656
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
657
continue;
658
659
if (ext->probe && !ext->probe(vcpu)) {
660
scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
661
continue;
662
}
663
664
scontext->ext_status[idx] = ext->default_disabled ?
665
KVM_RISCV_SBI_EXT_STATUS_DISABLED :
666
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
667
668
if (ext->init && ext->init(vcpu) != 0)
669
scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
670
}
671
}
672
673
void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu)
674
{
675
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
676
const struct kvm_riscv_sbi_extension_entry *entry;
677
const struct kvm_vcpu_sbi_extension *ext;
678
int idx, i;
679
680
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
681
entry = &sbi_ext[i];
682
ext = entry->ext_ptr;
683
idx = entry->ext_idx;
684
685
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
686
continue;
687
688
if (scontext->ext_status[idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE ||
689
!ext->deinit)
690
continue;
691
692
ext->deinit(vcpu);
693
}
694
}
695
696
void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu)
697
{
698
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
699
const struct kvm_riscv_sbi_extension_entry *entry;
700
const struct kvm_vcpu_sbi_extension *ext;
701
int idx, i;
702
703
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
704
entry = &sbi_ext[i];
705
ext = entry->ext_ptr;
706
idx = entry->ext_idx;
707
708
if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
709
continue;
710
711
if (scontext->ext_status[idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED ||
712
!ext->reset)
713
continue;
714
715
ext->reset(vcpu);
716
}
717
}
718
719