Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/sys_regs.c
29520 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2012,2013 - ARM Ltd
4
* Author: Marc Zyngier <[email protected]>
5
*
6
* Derived from arch/arm/kvm/coproc.c:
7
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
8
* Authors: Rusty Russell <[email protected]>
9
* Christoffer Dall <[email protected]>
10
*/
11
12
#include <linux/bitfield.h>
13
#include <linux/bsearch.h>
14
#include <linux/cacheinfo.h>
15
#include <linux/debugfs.h>
16
#include <linux/kvm_host.h>
17
#include <linux/mm.h>
18
#include <linux/printk.h>
19
#include <linux/uaccess.h>
20
#include <linux/irqchip/arm-gic-v3.h>
21
22
#include <asm/arm_pmuv3.h>
23
#include <asm/cacheflush.h>
24
#include <asm/cputype.h>
25
#include <asm/debug-monitors.h>
26
#include <asm/esr.h>
27
#include <asm/kvm_arm.h>
28
#include <asm/kvm_emulate.h>
29
#include <asm/kvm_hyp.h>
30
#include <asm/kvm_mmu.h>
31
#include <asm/kvm_nested.h>
32
#include <asm/perf_event.h>
33
#include <asm/sysreg.h>
34
35
#include <trace/events/kvm.h>
36
37
#include "sys_regs.h"
38
#include "vgic/vgic.h"
39
40
#include "trace.h"
41
42
/*
43
* For AArch32, we only take care of what is being trapped. Anything
44
* that has to do with init and userspace access has to go via the
45
* 64bit interface.
46
*/
47
48
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
50
u64 val);
51
52
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53
const struct sys_reg_desc *r)
54
{
55
kvm_inject_undefined(vcpu);
56
return false;
57
}
58
59
static bool bad_trap(struct kvm_vcpu *vcpu,
60
struct sys_reg_params *params,
61
const struct sys_reg_desc *r,
62
const char *msg)
63
{
64
WARN_ONCE(1, "Unexpected %s\n", msg);
65
print_sys_reg_instr(params);
66
return undef_access(vcpu, params, r);
67
}
68
69
static bool read_from_write_only(struct kvm_vcpu *vcpu,
70
struct sys_reg_params *params,
71
const struct sys_reg_desc *r)
72
{
73
return bad_trap(vcpu, params, r,
74
"sys_reg read to write-only register");
75
}
76
77
static bool write_to_read_only(struct kvm_vcpu *vcpu,
78
struct sys_reg_params *params,
79
const struct sys_reg_desc *r)
80
{
81
return bad_trap(vcpu, params, r,
82
"sys_reg write to read-only register");
83
}
84
85
enum sr_loc_attr {
86
SR_LOC_MEMORY = 0, /* Register definitely in memory */
87
SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
88
SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
89
SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
90
SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
91
};
92
93
struct sr_loc {
94
enum sr_loc_attr loc;
95
enum vcpu_sysreg map_reg;
96
u64 (*xlate)(u64);
97
};
98
99
static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
100
enum vcpu_sysreg reg)
101
{
102
switch (reg) {
103
case SCTLR_EL1:
104
case CPACR_EL1:
105
case TTBR0_EL1:
106
case TTBR1_EL1:
107
case TCR_EL1:
108
case TCR2_EL1:
109
case PIR_EL1:
110
case PIRE0_EL1:
111
case POR_EL1:
112
case ESR_EL1:
113
case AFSR0_EL1:
114
case AFSR1_EL1:
115
case FAR_EL1:
116
case MAIR_EL1:
117
case VBAR_EL1:
118
case CONTEXTIDR_EL1:
119
case AMAIR_EL1:
120
case CNTKCTL_EL1:
121
case ELR_EL1:
122
case SPSR_EL1:
123
case ZCR_EL1:
124
case SCTLR2_EL1:
125
/*
126
* EL1 registers which have an ELx2 mapping are loaded if
127
* we're not in hypervisor context.
128
*/
129
return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
130
131
case TPIDR_EL0:
132
case TPIDRRO_EL0:
133
case TPIDR_EL1:
134
case PAR_EL1:
135
case DACR32_EL2:
136
case IFSR32_EL2:
137
case DBGVCR32_EL2:
138
/* These registers are always loaded, no matter what */
139
return SR_LOC_LOADED;
140
141
default:
142
/* Non-mapped EL2 registers are by definition in memory. */
143
return SR_LOC_MEMORY;
144
}
145
}
146
147
static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
148
enum vcpu_sysreg reg,
149
enum vcpu_sysreg map_reg,
150
u64 (*xlate)(u64),
151
struct sr_loc *loc)
152
{
153
if (!is_hyp_ctxt(vcpu)) {
154
loc->loc = SR_LOC_MEMORY;
155
return;
156
}
157
158
loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
159
loc->map_reg = map_reg;
160
161
WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
162
163
if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
164
loc->loc |= SR_LOC_XLATED;
165
loc->xlate = xlate;
166
}
167
}
168
169
#define MAPPED_EL2_SYSREG(r, m, t) \
170
case r: { \
171
locate_mapped_el2_register(vcpu, r, m, t, loc); \
172
break; \
173
}
174
175
static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
176
struct sr_loc *loc)
177
{
178
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
179
loc->loc = SR_LOC_MEMORY;
180
return;
181
}
182
183
switch (reg) {
184
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
185
translate_sctlr_el2_to_sctlr_el1 );
186
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
187
translate_cptr_el2_to_cpacr_el1 );
188
MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
189
translate_ttbr0_el2_to_ttbr0_el1 );
190
MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
191
MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
192
translate_tcr_el2_to_tcr_el1 );
193
MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
194
MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
195
MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
196
MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
197
MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
198
MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
199
MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
200
MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
201
MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
202
MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
203
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
204
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
205
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
206
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
207
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
208
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
209
case CNTHCTL_EL2:
210
/* CNTHCTL_EL2 is super special, until we support NV2.1 */
211
loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
212
SR_LOC_SPECIAL : SR_LOC_MEMORY);
213
break;
214
default:
215
loc->loc = locate_direct_register(vcpu, reg);
216
}
217
}
218
219
static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
220
{
221
u64 val = 0x8badf00d8badf00d;
222
223
switch (reg) {
224
case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
225
case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
226
case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
227
case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
228
case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
229
case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
230
case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
231
case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
232
case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
233
case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
234
case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
235
case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
236
case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
237
case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
238
case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
239
case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
240
case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
241
case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
242
case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
243
case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
244
case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
245
case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
246
case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
247
case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
248
case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
249
case PAR_EL1: val = read_sysreg_par(); break;
250
case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
251
case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
252
case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
253
default: WARN_ON_ONCE(1);
254
}
255
256
return val;
257
}
258
259
static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
260
{
261
switch (reg) {
262
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
263
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
264
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
265
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
266
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
267
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
268
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
269
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
270
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
271
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
272
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
273
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
274
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
275
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
276
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
277
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
278
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
279
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
280
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
281
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
282
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
283
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
284
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
285
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
286
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
287
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
288
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
289
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
290
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
291
default: WARN_ON_ONCE(1);
292
}
293
}
294
295
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
296
{
297
struct sr_loc loc = {};
298
299
locate_register(vcpu, reg, &loc);
300
301
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
302
303
if (loc.loc & SR_LOC_SPECIAL) {
304
u64 val;
305
306
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
307
308
/*
309
* CNTHCTL_EL2 requires some special treatment to account
310
* for the bits that can be set via CNTKCTL_EL1 when E2H==1.
311
*/
312
switch (reg) {
313
case CNTHCTL_EL2:
314
val = read_sysreg_el1(SYS_CNTKCTL);
315
val &= CNTKCTL_VALID_BITS;
316
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
317
return val;
318
default:
319
WARN_ON_ONCE(1);
320
}
321
}
322
323
if (loc.loc & SR_LOC_LOADED) {
324
enum vcpu_sysreg map_reg = reg;
325
326
if (loc.loc & SR_LOC_MAPPED)
327
map_reg = loc.map_reg;
328
329
if (!(loc.loc & SR_LOC_XLATED)) {
330
u64 val = read_sr_from_cpu(map_reg);
331
332
if (reg >= __SANITISED_REG_START__)
333
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
334
335
return val;
336
}
337
}
338
339
return __vcpu_sys_reg(vcpu, reg);
340
}
341
342
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
343
{
344
struct sr_loc loc = {};
345
346
locate_register(vcpu, reg, &loc);
347
348
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
349
350
if (loc.loc & SR_LOC_SPECIAL) {
351
352
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
353
354
switch (reg) {
355
case CNTHCTL_EL2:
356
/*
357
* If E2H=1, some of the bits are backed by
358
* CNTKCTL_EL1, while the rest is kept in memory.
359
* Yes, this is fun stuff.
360
*/
361
write_sysreg_el1(val, SYS_CNTKCTL);
362
break;
363
default:
364
WARN_ON_ONCE(1);
365
}
366
}
367
368
if (loc.loc & SR_LOC_LOADED) {
369
enum vcpu_sysreg map_reg = reg;
370
u64 xlated_val;
371
372
if (reg >= __SANITISED_REG_START__)
373
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
374
375
if (loc.loc & SR_LOC_MAPPED)
376
map_reg = loc.map_reg;
377
378
if (loc.loc & SR_LOC_XLATED)
379
xlated_val = loc.xlate(val);
380
else
381
xlated_val = val;
382
383
write_sr_to_cpu(map_reg, xlated_val);
384
385
/*
386
* Fall through to write the backing store anyway, which
387
* allows translated registers to be directly read without a
388
* reverse translation.
389
*/
390
}
391
392
__vcpu_assign_sys_reg(vcpu, reg, val);
393
}
394
395
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
396
#define CSSELR_MAX 14
397
398
/*
399
* Returns the minimum line size for the selected cache, expressed as
400
* Log2(bytes).
401
*/
402
static u8 get_min_cache_line_size(bool icache)
403
{
404
u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
405
u8 field;
406
407
if (icache)
408
field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
409
else
410
field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
411
412
/*
413
* Cache line size is represented as Log2(words) in CTR_EL0.
414
* Log2(bytes) can be derived with the following:
415
*
416
* Log2(words) + 2 = Log2(bytes / 4) + 2
417
* = Log2(bytes) - 2 + 2
418
* = Log2(bytes)
419
*/
420
return field + 2;
421
}
422
423
/* Which cache CCSIDR represents depends on CSSELR value. */
424
static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
425
{
426
u8 line_size;
427
428
if (vcpu->arch.ccsidr)
429
return vcpu->arch.ccsidr[csselr];
430
431
line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
432
433
/*
434
* Fabricate a CCSIDR value as the overriding value does not exist.
435
* The real CCSIDR value will not be used as it can vary by the
436
* physical CPU which the vcpu currently resides in.
437
*
438
* The line size is determined with get_min_cache_line_size(), which
439
* should be valid for all CPUs even if they have different cache
440
* configuration.
441
*
442
* The associativity bits are cleared, meaning the geometry of all data
443
* and unified caches (which are guaranteed to be PIPT and thus
444
* non-aliasing) are 1 set and 1 way.
445
* Guests should not be doing cache operations by set/way at all, and
446
* for this reason, we trap them and attempt to infer the intent, so
447
* that we can flush the entire guest's address space at the appropriate
448
* time. The exposed geometry minimizes the number of the traps.
449
* [If guests should attempt to infer aliasing properties from the
450
* geometry (which is not permitted by the architecture), they would
451
* only do so for virtually indexed caches.]
452
*
453
* We don't check if the cache level exists as it is allowed to return
454
* an UNKNOWN value if not.
455
*/
456
return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
457
}
458
459
static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
460
{
461
u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
462
u32 *ccsidr = vcpu->arch.ccsidr;
463
u32 i;
464
465
if ((val & CCSIDR_EL1_RES0) ||
466
line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
467
return -EINVAL;
468
469
if (!ccsidr) {
470
if (val == get_ccsidr(vcpu, csselr))
471
return 0;
472
473
ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
474
if (!ccsidr)
475
return -ENOMEM;
476
477
for (i = 0; i < CSSELR_MAX; i++)
478
ccsidr[i] = get_ccsidr(vcpu, i);
479
480
vcpu->arch.ccsidr = ccsidr;
481
}
482
483
ccsidr[csselr] = val;
484
485
return 0;
486
}
487
488
static bool access_rw(struct kvm_vcpu *vcpu,
489
struct sys_reg_params *p,
490
const struct sys_reg_desc *r)
491
{
492
if (p->is_write)
493
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
494
else
495
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
496
497
return true;
498
}
499
500
/*
501
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
502
*/
503
static bool access_dcsw(struct kvm_vcpu *vcpu,
504
struct sys_reg_params *p,
505
const struct sys_reg_desc *r)
506
{
507
if (!p->is_write)
508
return read_from_write_only(vcpu, p, r);
509
510
/*
511
* Only track S/W ops if we don't have FWB. It still indicates
512
* that the guest is a bit broken (S/W operations should only
513
* be done by firmware, knowing that there is only a single
514
* CPU left in the system, and certainly not from non-secure
515
* software).
516
*/
517
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
518
kvm_set_way_flush(vcpu);
519
520
return true;
521
}
522
523
static bool access_dcgsw(struct kvm_vcpu *vcpu,
524
struct sys_reg_params *p,
525
const struct sys_reg_desc *r)
526
{
527
if (!kvm_has_mte(vcpu->kvm))
528
return undef_access(vcpu, p, r);
529
530
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
531
return access_dcsw(vcpu, p, r);
532
}
533
534
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
535
{
536
switch (r->aarch32_map) {
537
case AA32_LO:
538
*mask = GENMASK_ULL(31, 0);
539
*shift = 0;
540
break;
541
case AA32_HI:
542
*mask = GENMASK_ULL(63, 32);
543
*shift = 32;
544
break;
545
default:
546
*mask = GENMASK_ULL(63, 0);
547
*shift = 0;
548
break;
549
}
550
}
551
552
/*
553
* Generic accessor for VM registers. Only called as long as HCR_TVM
554
* is set. If the guest enables the MMU, we stop trapping the VM
555
* sys_regs and leave it in complete control of the caches.
556
*/
557
static bool access_vm_reg(struct kvm_vcpu *vcpu,
558
struct sys_reg_params *p,
559
const struct sys_reg_desc *r)
560
{
561
bool was_enabled = vcpu_has_cache_enabled(vcpu);
562
u64 val, mask, shift;
563
564
BUG_ON(!p->is_write);
565
566
get_access_mask(r, &mask, &shift);
567
568
if (~mask) {
569
val = vcpu_read_sys_reg(vcpu, r->reg);
570
val &= ~mask;
571
} else {
572
val = 0;
573
}
574
575
val |= (p->regval & (mask >> shift)) << shift;
576
vcpu_write_sys_reg(vcpu, val, r->reg);
577
578
kvm_toggle_cache(vcpu, was_enabled);
579
return true;
580
}
581
582
static bool access_actlr(struct kvm_vcpu *vcpu,
583
struct sys_reg_params *p,
584
const struct sys_reg_desc *r)
585
{
586
u64 mask, shift;
587
588
if (p->is_write)
589
return ignore_write(vcpu, p);
590
591
get_access_mask(r, &mask, &shift);
592
p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
593
594
return true;
595
}
596
597
/*
598
* Trap handler for the GICv3 SGI generation system register.
599
* Forward the request to the VGIC emulation.
600
* The cp15_64 code makes sure this automatically works
601
* for both AArch64 and AArch32 accesses.
602
*/
603
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
604
struct sys_reg_params *p,
605
const struct sys_reg_desc *r)
606
{
607
bool g1;
608
609
if (!kvm_has_gicv3(vcpu->kvm))
610
return undef_access(vcpu, p, r);
611
612
if (!p->is_write)
613
return read_from_write_only(vcpu, p, r);
614
615
/*
616
* In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
617
* Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
618
* depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
619
* equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
620
* group.
621
*/
622
if (p->Op0 == 0) { /* AArch32 */
623
switch (p->Op1) {
624
default: /* Keep GCC quiet */
625
case 0: /* ICC_SGI1R */
626
g1 = true;
627
break;
628
case 1: /* ICC_ASGI1R */
629
case 2: /* ICC_SGI0R */
630
g1 = false;
631
break;
632
}
633
} else { /* AArch64 */
634
switch (p->Op2) {
635
default: /* Keep GCC quiet */
636
case 5: /* ICC_SGI1R_EL1 */
637
g1 = true;
638
break;
639
case 6: /* ICC_ASGI1R_EL1 */
640
case 7: /* ICC_SGI0R_EL1 */
641
g1 = false;
642
break;
643
}
644
}
645
646
vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
647
648
return true;
649
}
650
651
static bool access_gic_sre(struct kvm_vcpu *vcpu,
652
struct sys_reg_params *p,
653
const struct sys_reg_desc *r)
654
{
655
if (!kvm_has_gicv3(vcpu->kvm))
656
return undef_access(vcpu, p, r);
657
658
if (p->is_write)
659
return ignore_write(vcpu, p);
660
661
if (p->Op1 == 4) { /* ICC_SRE_EL2 */
662
p->regval = KVM_ICC_SRE_EL2;
663
} else { /* ICC_SRE_EL1 */
664
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
665
}
666
667
return true;
668
}
669
670
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
671
struct sys_reg_params *p,
672
const struct sys_reg_desc *r)
673
{
674
if (p->is_write)
675
return ignore_write(vcpu, p);
676
else
677
return read_zero(vcpu, p);
678
}
679
680
/*
681
* ARMv8.1 mandates at least a trivial LORegion implementation, where all the
682
* RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
683
* system, these registers should UNDEF. LORID_EL1 being a RO register, we
684
* treat it separately.
685
*/
686
static bool trap_loregion(struct kvm_vcpu *vcpu,
687
struct sys_reg_params *p,
688
const struct sys_reg_desc *r)
689
{
690
u32 sr = reg_to_encoding(r);
691
692
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
693
return undef_access(vcpu, p, r);
694
695
if (p->is_write && sr == SYS_LORID_EL1)
696
return write_to_read_only(vcpu, p, r);
697
698
return trap_raz_wi(vcpu, p, r);
699
}
700
701
static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
702
struct sys_reg_params *p,
703
const struct sys_reg_desc *r)
704
{
705
if (!p->is_write)
706
return read_from_write_only(vcpu, p, r);
707
708
kvm_debug_handle_oslar(vcpu, p->regval);
709
return true;
710
}
711
712
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
713
struct sys_reg_params *p,
714
const struct sys_reg_desc *r)
715
{
716
if (p->is_write)
717
return write_to_read_only(vcpu, p, r);
718
719
p->regval = __vcpu_sys_reg(vcpu, r->reg);
720
return true;
721
}
722
723
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
724
u64 val)
725
{
726
/*
727
* The only modifiable bit is the OSLK bit. Refuse the write if
728
* userspace attempts to change any other bit in the register.
729
*/
730
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
731
return -EINVAL;
732
733
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
734
return 0;
735
}
736
737
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
738
struct sys_reg_params *p,
739
const struct sys_reg_desc *r)
740
{
741
if (p->is_write) {
742
return ignore_write(vcpu, p);
743
} else {
744
p->regval = read_sysreg(dbgauthstatus_el1);
745
return true;
746
}
747
}
748
749
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
750
struct sys_reg_params *p,
751
const struct sys_reg_desc *r)
752
{
753
access_rw(vcpu, p, r);
754
755
kvm_debug_set_guest_ownership(vcpu);
756
return true;
757
}
758
759
/*
760
* reg_to_dbg/dbg_to_reg
761
*
762
* A 32 bit write to a debug register leave top bits alone
763
* A 32 bit read from a debug register only returns the bottom bits
764
*/
765
static void reg_to_dbg(struct kvm_vcpu *vcpu,
766
struct sys_reg_params *p,
767
const struct sys_reg_desc *rd,
768
u64 *dbg_reg)
769
{
770
u64 mask, shift, val;
771
772
get_access_mask(rd, &mask, &shift);
773
774
val = *dbg_reg;
775
val &= ~mask;
776
val |= (p->regval & (mask >> shift)) << shift;
777
*dbg_reg = val;
778
}
779
780
static void dbg_to_reg(struct kvm_vcpu *vcpu,
781
struct sys_reg_params *p,
782
const struct sys_reg_desc *rd,
783
u64 *dbg_reg)
784
{
785
u64 mask, shift;
786
787
get_access_mask(rd, &mask, &shift);
788
p->regval = (*dbg_reg & mask) >> shift;
789
}
790
791
static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
792
{
793
struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
794
795
switch (rd->Op2) {
796
case 0b100:
797
return &dbg->dbg_bvr[rd->CRm];
798
case 0b101:
799
return &dbg->dbg_bcr[rd->CRm];
800
case 0b110:
801
return &dbg->dbg_wvr[rd->CRm];
802
case 0b111:
803
return &dbg->dbg_wcr[rd->CRm];
804
default:
805
KVM_BUG_ON(1, vcpu->kvm);
806
return NULL;
807
}
808
}
809
810
static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
811
const struct sys_reg_desc *rd)
812
{
813
u64 *reg = demux_wb_reg(vcpu, rd);
814
815
if (!reg)
816
return false;
817
818
if (p->is_write)
819
reg_to_dbg(vcpu, p, rd, reg);
820
else
821
dbg_to_reg(vcpu, p, rd, reg);
822
823
kvm_debug_set_guest_ownership(vcpu);
824
return true;
825
}
826
827
static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
828
u64 val)
829
{
830
u64 *reg = demux_wb_reg(vcpu, rd);
831
832
if (!reg)
833
return -EINVAL;
834
835
*reg = val;
836
return 0;
837
}
838
839
static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
840
u64 *val)
841
{
842
u64 *reg = demux_wb_reg(vcpu, rd);
843
844
if (!reg)
845
return -EINVAL;
846
847
*val = *reg;
848
return 0;
849
}
850
851
static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
852
{
853
u64 *reg = demux_wb_reg(vcpu, rd);
854
855
/*
856
* Bail early if we couldn't find storage for the register, the
857
* KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
858
* being run.
859
*/
860
if (!reg)
861
return 0;
862
863
*reg = rd->val;
864
return rd->val;
865
}
866
867
static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
868
{
869
u64 amair = read_sysreg(amair_el1);
870
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
871
return amair;
872
}
873
874
static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
875
{
876
u64 actlr = read_sysreg(actlr_el1);
877
vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
878
return actlr;
879
}
880
881
static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
882
{
883
u64 mpidr;
884
885
/*
886
* Map the vcpu_id into the first three affinity level fields of
887
* the MPIDR. We limit the number of VCPUs in level 0 due to a
888
* limitation to 16 CPUs in that level in the ICC_SGIxR registers
889
* of the GICv3 to be able to address each CPU directly when
890
* sending IPIs.
891
*/
892
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
893
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
894
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
895
mpidr |= (1ULL << 31);
896
vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
897
898
return mpidr;
899
}
900
901
static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
902
const struct sys_reg_desc *r)
903
{
904
return REG_HIDDEN;
905
}
906
907
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
908
const struct sys_reg_desc *r)
909
{
910
if (kvm_vcpu_has_pmu(vcpu))
911
return 0;
912
913
return REG_HIDDEN;
914
}
915
916
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
917
{
918
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
919
u8 n = vcpu->kvm->arch.nr_pmu_counters;
920
921
if (n)
922
mask |= GENMASK(n - 1, 0);
923
924
reset_unknown(vcpu, r);
925
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
926
927
return __vcpu_sys_reg(vcpu, r->reg);
928
}
929
930
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
931
{
932
reset_unknown(vcpu, r);
933
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
934
935
return __vcpu_sys_reg(vcpu, r->reg);
936
}
937
938
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
939
{
940
/* This thing will UNDEF, who cares about the reset value? */
941
if (!kvm_vcpu_has_pmu(vcpu))
942
return 0;
943
944
reset_unknown(vcpu, r);
945
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
946
947
return __vcpu_sys_reg(vcpu, r->reg);
948
}
949
950
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
951
{
952
reset_unknown(vcpu, r);
953
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
954
955
return __vcpu_sys_reg(vcpu, r->reg);
956
}
957
958
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
959
{
960
u64 pmcr = 0;
961
962
if (!kvm_supports_32bit_el0())
963
pmcr |= ARMV8_PMU_PMCR_LC;
964
965
/*
966
* The value of PMCR.N field is included when the
967
* vCPU register is read via kvm_vcpu_read_pmcr().
968
*/
969
__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
970
971
return __vcpu_sys_reg(vcpu, r->reg);
972
}
973
974
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
975
{
976
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
977
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
978
979
if (!enabled)
980
kvm_inject_undefined(vcpu);
981
982
return !enabled;
983
}
984
985
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
986
{
987
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
988
}
989
990
static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
991
{
992
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
993
}
994
995
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
996
{
997
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
998
}
999
1000
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
1001
{
1002
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
1003
}
1004
1005
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1006
const struct sys_reg_desc *r)
1007
{
1008
u64 val;
1009
1010
if (pmu_access_el0_disabled(vcpu))
1011
return false;
1012
1013
if (p->is_write) {
1014
/*
1015
* Only update writeable bits of PMCR (continuing into
1016
* kvm_pmu_handle_pmcr() as well)
1017
*/
1018
val = kvm_vcpu_read_pmcr(vcpu);
1019
val &= ~ARMV8_PMU_PMCR_MASK;
1020
val |= p->regval & ARMV8_PMU_PMCR_MASK;
1021
if (!kvm_supports_32bit_el0())
1022
val |= ARMV8_PMU_PMCR_LC;
1023
kvm_pmu_handle_pmcr(vcpu, val);
1024
} else {
1025
/* PMCR.P & PMCR.C are RAZ */
1026
val = kvm_vcpu_read_pmcr(vcpu)
1027
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
1028
p->regval = val;
1029
}
1030
1031
return true;
1032
}
1033
1034
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1035
const struct sys_reg_desc *r)
1036
{
1037
if (pmu_access_event_counter_el0_disabled(vcpu))
1038
return false;
1039
1040
if (p->is_write)
1041
__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
1042
else
1043
/* return PMSELR.SEL field */
1044
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1045
& PMSELR_EL0_SEL_MASK;
1046
1047
return true;
1048
}
1049
1050
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1051
const struct sys_reg_desc *r)
1052
{
1053
u64 pmceid, mask, shift;
1054
1055
BUG_ON(p->is_write);
1056
1057
if (pmu_access_el0_disabled(vcpu))
1058
return false;
1059
1060
get_access_mask(r, &mask, &shift);
1061
1062
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1063
pmceid &= mask;
1064
pmceid >>= shift;
1065
1066
p->regval = pmceid;
1067
1068
return true;
1069
}
1070
1071
static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1072
{
1073
u64 pmcr, val;
1074
1075
pmcr = kvm_vcpu_read_pmcr(vcpu);
1076
val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1077
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1078
kvm_inject_undefined(vcpu);
1079
return false;
1080
}
1081
1082
return true;
1083
}
1084
1085
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1086
u64 *val)
1087
{
1088
u64 idx;
1089
1090
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1091
/* PMCCNTR_EL0 */
1092
idx = ARMV8_PMU_CYCLE_IDX;
1093
else
1094
/* PMEVCNTRn_EL0 */
1095
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1096
1097
*val = kvm_pmu_get_counter_value(vcpu, idx);
1098
return 0;
1099
}
1100
1101
static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1102
u64 val)
1103
{
1104
u64 idx;
1105
1106
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1107
/* PMCCNTR_EL0 */
1108
idx = ARMV8_PMU_CYCLE_IDX;
1109
else
1110
/* PMEVCNTRn_EL0 */
1111
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1112
1113
kvm_pmu_set_counter_value_user(vcpu, idx, val);
1114
return 0;
1115
}
1116
1117
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1118
struct sys_reg_params *p,
1119
const struct sys_reg_desc *r)
1120
{
1121
u64 idx = ~0UL;
1122
1123
if (r->CRn == 9 && r->CRm == 13) {
1124
if (r->Op2 == 2) {
1125
/* PMXEVCNTR_EL0 */
1126
if (pmu_access_event_counter_el0_disabled(vcpu))
1127
return false;
1128
1129
idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1130
__vcpu_sys_reg(vcpu, PMSELR_EL0));
1131
} else if (r->Op2 == 0) {
1132
/* PMCCNTR_EL0 */
1133
if (pmu_access_cycle_counter_el0_disabled(vcpu))
1134
return false;
1135
1136
idx = ARMV8_PMU_CYCLE_IDX;
1137
}
1138
} else if (r->CRn == 0 && r->CRm == 9) {
1139
/* PMCCNTR */
1140
if (pmu_access_event_counter_el0_disabled(vcpu))
1141
return false;
1142
1143
idx = ARMV8_PMU_CYCLE_IDX;
1144
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1145
/* PMEVCNTRn_EL0 */
1146
if (pmu_access_event_counter_el0_disabled(vcpu))
1147
return false;
1148
1149
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1150
}
1151
1152
/* Catch any decoding mistake */
1153
WARN_ON(idx == ~0UL);
1154
1155
if (!pmu_counter_idx_valid(vcpu, idx))
1156
return false;
1157
1158
if (p->is_write) {
1159
if (pmu_access_el0_disabled(vcpu))
1160
return false;
1161
1162
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1163
} else {
1164
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1165
}
1166
1167
return true;
1168
}
1169
1170
static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1171
const struct sys_reg_desc *r)
1172
{
1173
u64 idx, reg;
1174
1175
if (pmu_access_el0_disabled(vcpu))
1176
return false;
1177
1178
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1179
/* PMXEVTYPER_EL0 */
1180
idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1181
reg = PMEVTYPER0_EL0 + idx;
1182
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1183
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1184
if (idx == ARMV8_PMU_CYCLE_IDX)
1185
reg = PMCCFILTR_EL0;
1186
else
1187
/* PMEVTYPERn_EL0 */
1188
reg = PMEVTYPER0_EL0 + idx;
1189
} else {
1190
BUG();
1191
}
1192
1193
if (!pmu_counter_idx_valid(vcpu, idx))
1194
return false;
1195
1196
if (p->is_write) {
1197
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1198
kvm_vcpu_pmu_restore_guest(vcpu);
1199
} else {
1200
p->regval = __vcpu_sys_reg(vcpu, reg);
1201
}
1202
1203
return true;
1204
}
1205
1206
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1207
{
1208
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1209
1210
__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1211
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1212
1213
return 0;
1214
}
1215
1216
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1217
{
1218
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1219
1220
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1221
return 0;
1222
}
1223
1224
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1225
const struct sys_reg_desc *r)
1226
{
1227
u64 val, mask;
1228
1229
if (pmu_access_el0_disabled(vcpu))
1230
return false;
1231
1232
mask = kvm_pmu_accessible_counter_mask(vcpu);
1233
if (p->is_write) {
1234
val = p->regval & mask;
1235
if (r->Op2 & 0x1)
1236
/* accessing PMCNTENSET_EL0 */
1237
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1238
else
1239
/* accessing PMCNTENCLR_EL0 */
1240
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1241
1242
kvm_pmu_reprogram_counter_mask(vcpu, val);
1243
} else {
1244
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1245
}
1246
1247
return true;
1248
}
1249
1250
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1251
const struct sys_reg_desc *r)
1252
{
1253
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1254
1255
if (check_pmu_access_disabled(vcpu, 0))
1256
return false;
1257
1258
if (p->is_write) {
1259
u64 val = p->regval & mask;
1260
1261
if (r->Op2 & 0x1)
1262
/* accessing PMINTENSET_EL1 */
1263
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1264
else
1265
/* accessing PMINTENCLR_EL1 */
1266
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1267
} else {
1268
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1269
}
1270
1271
return true;
1272
}
1273
1274
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1275
const struct sys_reg_desc *r)
1276
{
1277
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1278
1279
if (pmu_access_el0_disabled(vcpu))
1280
return false;
1281
1282
if (p->is_write) {
1283
if (r->CRm & 0x2)
1284
/* accessing PMOVSSET_EL0 */
1285
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1286
else
1287
/* accessing PMOVSCLR_EL0 */
1288
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1289
} else {
1290
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1291
}
1292
1293
return true;
1294
}
1295
1296
static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1297
const struct sys_reg_desc *r)
1298
{
1299
u64 mask;
1300
1301
if (!p->is_write)
1302
return read_from_write_only(vcpu, p, r);
1303
1304
if (pmu_write_swinc_el0_disabled(vcpu))
1305
return false;
1306
1307
mask = kvm_pmu_accessible_counter_mask(vcpu);
1308
kvm_pmu_software_increment(vcpu, p->regval & mask);
1309
return true;
1310
}
1311
1312
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1313
const struct sys_reg_desc *r)
1314
{
1315
if (p->is_write) {
1316
if (!vcpu_mode_priv(vcpu))
1317
return undef_access(vcpu, p, r);
1318
1319
__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1320
(p->regval & ARMV8_PMU_USERENR_MASK));
1321
} else {
1322
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1323
& ARMV8_PMU_USERENR_MASK;
1324
}
1325
1326
return true;
1327
}
1328
1329
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1330
u64 *val)
1331
{
1332
*val = kvm_vcpu_read_pmcr(vcpu);
1333
return 0;
1334
}
1335
1336
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1337
u64 val)
1338
{
1339
u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1340
struct kvm *kvm = vcpu->kvm;
1341
1342
mutex_lock(&kvm->arch.config_lock);
1343
1344
/*
1345
* The vCPU can't have more counters than the PMU hardware
1346
* implements. Ignore this error to maintain compatibility
1347
* with the existing KVM behavior.
1348
*/
1349
if (!kvm_vm_has_ran_once(kvm) &&
1350
!vcpu_has_nv(vcpu) &&
1351
new_n <= kvm_arm_pmu_get_max_counters(kvm))
1352
kvm->arch.nr_pmu_counters = new_n;
1353
1354
mutex_unlock(&kvm->arch.config_lock);
1355
1356
/*
1357
* Ignore writes to RES0 bits, read only bits that are cleared on
1358
* vCPU reset, and writable bits that KVM doesn't support yet.
1359
* (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1360
* The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1361
* But, we leave the bit as it is here, as the vCPU's PMUver might
1362
* be changed later (NOTE: the bit will be cleared on first vCPU run
1363
* if necessary).
1364
*/
1365
val &= ARMV8_PMU_PMCR_MASK;
1366
1367
/* The LC bit is RES1 when AArch32 is not supported */
1368
if (!kvm_supports_32bit_el0())
1369
val |= ARMV8_PMU_PMCR_LC;
1370
1371
__vcpu_assign_sys_reg(vcpu, r->reg, val);
1372
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1373
1374
return 0;
1375
}
1376
1377
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1378
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1379
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1380
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1381
get_dbg_wb_reg, set_dbg_wb_reg }, \
1382
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1383
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1384
get_dbg_wb_reg, set_dbg_wb_reg }, \
1385
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1386
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1387
get_dbg_wb_reg, set_dbg_wb_reg }, \
1388
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1389
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1390
get_dbg_wb_reg, set_dbg_wb_reg }
1391
1392
#define PMU_SYS_REG(name) \
1393
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1394
.visibility = pmu_visibility
1395
1396
/* Macro to expand the PMEVCNTRn_EL0 register */
1397
#define PMU_PMEVCNTR_EL0(n) \
1398
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1399
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1400
.set_user = set_pmu_evcntr, \
1401
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1402
1403
/* Macro to expand the PMEVTYPERn_EL0 register */
1404
#define PMU_PMEVTYPER_EL0(n) \
1405
{ PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1406
.reset = reset_pmevtyper, \
1407
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1408
1409
/* Macro to expand the AMU counter and type registers*/
1410
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1411
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1412
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1413
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1414
1415
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1416
const struct sys_reg_desc *rd)
1417
{
1418
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1419
}
1420
1421
/*
1422
* If we land here on a PtrAuth access, that is because we didn't
1423
* fixup the access on exit by allowing the PtrAuth sysregs. The only
1424
* way this happens is when the guest does not have PtrAuth support
1425
* enabled.
1426
*/
1427
#define __PTRAUTH_KEY(k) \
1428
{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1429
.visibility = ptrauth_visibility}
1430
1431
#define PTRAUTH_KEY(k) \
1432
__PTRAUTH_KEY(k ## KEYLO_EL1), \
1433
__PTRAUTH_KEY(k ## KEYHI_EL1)
1434
1435
static bool access_arch_timer(struct kvm_vcpu *vcpu,
1436
struct sys_reg_params *p,
1437
const struct sys_reg_desc *r)
1438
{
1439
enum kvm_arch_timers tmr;
1440
enum kvm_arch_timer_regs treg;
1441
u64 reg = reg_to_encoding(r);
1442
1443
switch (reg) {
1444
case SYS_CNTP_TVAL_EL0:
1445
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1446
tmr = TIMER_HPTIMER;
1447
else
1448
tmr = TIMER_PTIMER;
1449
treg = TIMER_REG_TVAL;
1450
break;
1451
1452
case SYS_CNTV_TVAL_EL0:
1453
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1454
tmr = TIMER_HVTIMER;
1455
else
1456
tmr = TIMER_VTIMER;
1457
treg = TIMER_REG_TVAL;
1458
break;
1459
1460
case SYS_AARCH32_CNTP_TVAL:
1461
case SYS_CNTP_TVAL_EL02:
1462
tmr = TIMER_PTIMER;
1463
treg = TIMER_REG_TVAL;
1464
break;
1465
1466
case SYS_CNTV_TVAL_EL02:
1467
tmr = TIMER_VTIMER;
1468
treg = TIMER_REG_TVAL;
1469
break;
1470
1471
case SYS_CNTHP_TVAL_EL2:
1472
tmr = TIMER_HPTIMER;
1473
treg = TIMER_REG_TVAL;
1474
break;
1475
1476
case SYS_CNTHV_TVAL_EL2:
1477
tmr = TIMER_HVTIMER;
1478
treg = TIMER_REG_TVAL;
1479
break;
1480
1481
case SYS_CNTP_CTL_EL0:
1482
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1483
tmr = TIMER_HPTIMER;
1484
else
1485
tmr = TIMER_PTIMER;
1486
treg = TIMER_REG_CTL;
1487
break;
1488
1489
case SYS_CNTV_CTL_EL0:
1490
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1491
tmr = TIMER_HVTIMER;
1492
else
1493
tmr = TIMER_VTIMER;
1494
treg = TIMER_REG_CTL;
1495
break;
1496
1497
case SYS_AARCH32_CNTP_CTL:
1498
case SYS_CNTP_CTL_EL02:
1499
tmr = TIMER_PTIMER;
1500
treg = TIMER_REG_CTL;
1501
break;
1502
1503
case SYS_CNTV_CTL_EL02:
1504
tmr = TIMER_VTIMER;
1505
treg = TIMER_REG_CTL;
1506
break;
1507
1508
case SYS_CNTHP_CTL_EL2:
1509
tmr = TIMER_HPTIMER;
1510
treg = TIMER_REG_CTL;
1511
break;
1512
1513
case SYS_CNTHV_CTL_EL2:
1514
tmr = TIMER_HVTIMER;
1515
treg = TIMER_REG_CTL;
1516
break;
1517
1518
case SYS_CNTP_CVAL_EL0:
1519
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1520
tmr = TIMER_HPTIMER;
1521
else
1522
tmr = TIMER_PTIMER;
1523
treg = TIMER_REG_CVAL;
1524
break;
1525
1526
case SYS_CNTV_CVAL_EL0:
1527
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1528
tmr = TIMER_HVTIMER;
1529
else
1530
tmr = TIMER_VTIMER;
1531
treg = TIMER_REG_CVAL;
1532
break;
1533
1534
case SYS_AARCH32_CNTP_CVAL:
1535
case SYS_CNTP_CVAL_EL02:
1536
tmr = TIMER_PTIMER;
1537
treg = TIMER_REG_CVAL;
1538
break;
1539
1540
case SYS_CNTV_CVAL_EL02:
1541
tmr = TIMER_VTIMER;
1542
treg = TIMER_REG_CVAL;
1543
break;
1544
1545
case SYS_CNTHP_CVAL_EL2:
1546
tmr = TIMER_HPTIMER;
1547
treg = TIMER_REG_CVAL;
1548
break;
1549
1550
case SYS_CNTHV_CVAL_EL2:
1551
tmr = TIMER_HVTIMER;
1552
treg = TIMER_REG_CVAL;
1553
break;
1554
1555
case SYS_CNTPCT_EL0:
1556
case SYS_CNTPCTSS_EL0:
1557
if (is_hyp_ctxt(vcpu))
1558
tmr = TIMER_HPTIMER;
1559
else
1560
tmr = TIMER_PTIMER;
1561
treg = TIMER_REG_CNT;
1562
break;
1563
1564
case SYS_AARCH32_CNTPCT:
1565
case SYS_AARCH32_CNTPCTSS:
1566
tmr = TIMER_PTIMER;
1567
treg = TIMER_REG_CNT;
1568
break;
1569
1570
case SYS_CNTVCT_EL0:
1571
case SYS_CNTVCTSS_EL0:
1572
if (is_hyp_ctxt(vcpu))
1573
tmr = TIMER_HVTIMER;
1574
else
1575
tmr = TIMER_VTIMER;
1576
treg = TIMER_REG_CNT;
1577
break;
1578
1579
case SYS_AARCH32_CNTVCT:
1580
case SYS_AARCH32_CNTVCTSS:
1581
tmr = TIMER_VTIMER;
1582
treg = TIMER_REG_CNT;
1583
break;
1584
1585
default:
1586
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1587
return undef_access(vcpu, p, r);
1588
}
1589
1590
if (p->is_write)
1591
kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1592
else
1593
p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1594
1595
return true;
1596
}
1597
1598
static bool access_hv_timer(struct kvm_vcpu *vcpu,
1599
struct sys_reg_params *p,
1600
const struct sys_reg_desc *r)
1601
{
1602
if (!vcpu_el2_e2h_is_set(vcpu))
1603
return undef_access(vcpu, p, r);
1604
1605
return access_arch_timer(vcpu, p, r);
1606
}
1607
1608
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1609
s64 new, s64 cur)
1610
{
1611
struct arm64_ftr_bits kvm_ftr = *ftrp;
1612
1613
/* Some features have different safe value type in KVM than host features */
1614
switch (id) {
1615
case SYS_ID_AA64DFR0_EL1:
1616
switch (kvm_ftr.shift) {
1617
case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1618
kvm_ftr.type = FTR_LOWER_SAFE;
1619
break;
1620
case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1621
kvm_ftr.type = FTR_LOWER_SAFE;
1622
break;
1623
}
1624
break;
1625
case SYS_ID_DFR0_EL1:
1626
if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1627
kvm_ftr.type = FTR_LOWER_SAFE;
1628
break;
1629
}
1630
1631
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1632
}
1633
1634
/*
1635
* arm64_check_features() - Check if a feature register value constitutes
1636
* a subset of features indicated by the idreg's KVM sanitised limit.
1637
*
1638
* This function will check if each feature field of @val is the "safe" value
1639
* against idreg's KVM sanitised limit return from reset() callback.
1640
* If a field value in @val is the same as the one in limit, it is always
1641
* considered the safe value regardless For register fields that are not in
1642
* writable, only the value in limit is considered the safe value.
1643
*
1644
* Return: 0 if all the fields are safe. Otherwise, return negative errno.
1645
*/
1646
static int arm64_check_features(struct kvm_vcpu *vcpu,
1647
const struct sys_reg_desc *rd,
1648
u64 val)
1649
{
1650
const struct arm64_ftr_reg *ftr_reg;
1651
const struct arm64_ftr_bits *ftrp = NULL;
1652
u32 id = reg_to_encoding(rd);
1653
u64 writable_mask = rd->val;
1654
u64 limit = rd->reset(vcpu, rd);
1655
u64 mask = 0;
1656
1657
/*
1658
* Hidden and unallocated ID registers may not have a corresponding
1659
* struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1660
* only safe value is 0.
1661
*/
1662
if (sysreg_visible_as_raz(vcpu, rd))
1663
return val ? -E2BIG : 0;
1664
1665
ftr_reg = get_arm64_ftr_reg(id);
1666
if (!ftr_reg)
1667
return -EINVAL;
1668
1669
ftrp = ftr_reg->ftr_bits;
1670
1671
for (; ftrp && ftrp->width; ftrp++) {
1672
s64 f_val, f_lim, safe_val;
1673
u64 ftr_mask;
1674
1675
ftr_mask = arm64_ftr_mask(ftrp);
1676
if ((ftr_mask & writable_mask) != ftr_mask)
1677
continue;
1678
1679
f_val = arm64_ftr_value(ftrp, val);
1680
f_lim = arm64_ftr_value(ftrp, limit);
1681
mask |= ftr_mask;
1682
1683
if (f_val == f_lim)
1684
safe_val = f_val;
1685
else
1686
safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1687
1688
if (safe_val != f_val)
1689
return -E2BIG;
1690
}
1691
1692
/* For fields that are not writable, values in limit are the safe values. */
1693
if ((val & ~mask) != (limit & ~mask))
1694
return -E2BIG;
1695
1696
return 0;
1697
}
1698
1699
static u8 pmuver_to_perfmon(u8 pmuver)
1700
{
1701
switch (pmuver) {
1702
case ID_AA64DFR0_EL1_PMUVer_IMP:
1703
return ID_DFR0_EL1_PerfMon_PMUv3;
1704
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1705
return ID_DFR0_EL1_PerfMon_IMPDEF;
1706
default:
1707
/* Anything ARMv8.1+ and NI have the same value. For now. */
1708
return pmuver;
1709
}
1710
}
1711
1712
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1713
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
1714
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1715
1716
/* Read a sanitised cpufeature ID register by sys_reg_desc */
1717
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1718
const struct sys_reg_desc *r)
1719
{
1720
u32 id = reg_to_encoding(r);
1721
u64 val;
1722
1723
if (sysreg_visible_as_raz(vcpu, r))
1724
return 0;
1725
1726
val = read_sanitised_ftr_reg(id);
1727
1728
switch (id) {
1729
case SYS_ID_AA64DFR0_EL1:
1730
val = sanitise_id_aa64dfr0_el1(vcpu, val);
1731
break;
1732
case SYS_ID_AA64PFR0_EL1:
1733
val = sanitise_id_aa64pfr0_el1(vcpu, val);
1734
break;
1735
case SYS_ID_AA64PFR1_EL1:
1736
val = sanitise_id_aa64pfr1_el1(vcpu, val);
1737
break;
1738
case SYS_ID_AA64PFR2_EL1:
1739
val &= ID_AA64PFR2_EL1_FPMR |
1740
(kvm_has_mte(vcpu->kvm) ?
1741
ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
1742
0);
1743
break;
1744
case SYS_ID_AA64ISAR1_EL1:
1745
if (!vcpu_has_ptrauth(vcpu))
1746
val &= ~(ID_AA64ISAR1_EL1_APA |
1747
ID_AA64ISAR1_EL1_API |
1748
ID_AA64ISAR1_EL1_GPA |
1749
ID_AA64ISAR1_EL1_GPI);
1750
break;
1751
case SYS_ID_AA64ISAR2_EL1:
1752
if (!vcpu_has_ptrauth(vcpu))
1753
val &= ~(ID_AA64ISAR2_EL1_APA3 |
1754
ID_AA64ISAR2_EL1_GPA3);
1755
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1756
has_broken_cntvoff())
1757
val &= ~ID_AA64ISAR2_EL1_WFxT;
1758
break;
1759
case SYS_ID_AA64ISAR3_EL1:
1760
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
1761
ID_AA64ISAR3_EL1_FAMINMAX;
1762
break;
1763
case SYS_ID_AA64MMFR2_EL1:
1764
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1765
val &= ~ID_AA64MMFR2_EL1_NV;
1766
break;
1767
case SYS_ID_AA64MMFR3_EL1:
1768
val &= ID_AA64MMFR3_EL1_TCRX |
1769
ID_AA64MMFR3_EL1_SCTLRX |
1770
ID_AA64MMFR3_EL1_S1POE |
1771
ID_AA64MMFR3_EL1_S1PIE;
1772
break;
1773
case SYS_ID_MMFR4_EL1:
1774
val &= ~ID_MMFR4_EL1_CCIDX;
1775
break;
1776
}
1777
1778
if (vcpu_has_nv(vcpu))
1779
val = limit_nv_id_reg(vcpu->kvm, id, val);
1780
1781
return val;
1782
}
1783
1784
static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1785
const struct sys_reg_desc *r)
1786
{
1787
return __kvm_read_sanitised_id_reg(vcpu, r);
1788
}
1789
1790
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1791
{
1792
return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1793
}
1794
1795
static bool is_feature_id_reg(u32 encoding)
1796
{
1797
return (sys_reg_Op0(encoding) == 3 &&
1798
(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1799
sys_reg_CRn(encoding) == 0 &&
1800
sys_reg_CRm(encoding) <= 7);
1801
}
1802
1803
/*
1804
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1805
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1806
* registers KVM maintains on a per-VM basis.
1807
*
1808
* Additionally, the implementation ID registers and CTR_EL0 are handled as
1809
* per-VM registers.
1810
*/
1811
static inline bool is_vm_ftr_id_reg(u32 id)
1812
{
1813
switch (id) {
1814
case SYS_CTR_EL0:
1815
case SYS_MIDR_EL1:
1816
case SYS_REVIDR_EL1:
1817
case SYS_AIDR_EL1:
1818
return true;
1819
default:
1820
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1821
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1822
sys_reg_CRm(id) < 8);
1823
1824
}
1825
}
1826
1827
static inline bool is_vcpu_ftr_id_reg(u32 id)
1828
{
1829
return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1830
}
1831
1832
static inline bool is_aa32_id_reg(u32 id)
1833
{
1834
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1835
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1836
sys_reg_CRm(id) <= 3);
1837
}
1838
1839
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1840
const struct sys_reg_desc *r)
1841
{
1842
u32 id = reg_to_encoding(r);
1843
1844
switch (id) {
1845
case SYS_ID_AA64ZFR0_EL1:
1846
if (!vcpu_has_sve(vcpu))
1847
return REG_RAZ;
1848
break;
1849
}
1850
1851
return 0;
1852
}
1853
1854
static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1855
const struct sys_reg_desc *r)
1856
{
1857
/*
1858
* AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1859
* EL. Promote to RAZ/WI in order to guarantee consistency between
1860
* systems.
1861
*/
1862
if (!kvm_supports_32bit_el0())
1863
return REG_RAZ | REG_USER_WI;
1864
1865
return id_visibility(vcpu, r);
1866
}
1867
1868
static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1869
const struct sys_reg_desc *r)
1870
{
1871
return REG_RAZ;
1872
}
1873
1874
/* cpufeature ID register access trap handlers */
1875
1876
static bool access_id_reg(struct kvm_vcpu *vcpu,
1877
struct sys_reg_params *p,
1878
const struct sys_reg_desc *r)
1879
{
1880
if (p->is_write)
1881
return write_to_read_only(vcpu, p, r);
1882
1883
p->regval = read_id_reg(vcpu, r);
1884
1885
return true;
1886
}
1887
1888
/* Visibility overrides for SVE-specific control registers */
1889
static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1890
const struct sys_reg_desc *rd)
1891
{
1892
if (vcpu_has_sve(vcpu))
1893
return 0;
1894
1895
return REG_HIDDEN;
1896
}
1897
1898
static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1899
const struct sys_reg_desc *rd)
1900
{
1901
if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1902
return 0;
1903
1904
return REG_HIDDEN;
1905
}
1906
1907
static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1908
const struct sys_reg_desc *rd)
1909
{
1910
if (kvm_has_fpmr(vcpu->kvm))
1911
return 0;
1912
1913
return REG_HIDDEN;
1914
}
1915
1916
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1917
{
1918
if (!vcpu_has_sve(vcpu))
1919
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1920
1921
/*
1922
* The default is to expose CSV2 == 1 if the HW isn't affected.
1923
* Although this is a per-CPU feature, we make it global because
1924
* asymmetric systems are just a nuisance.
1925
*
1926
* Userspace can override this as long as it doesn't promise
1927
* the impossible.
1928
*/
1929
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1930
val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1931
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1932
}
1933
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1934
val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1935
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1936
}
1937
1938
if (vgic_is_v3(vcpu->kvm)) {
1939
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1940
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1941
}
1942
1943
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1944
1945
/*
1946
* MPAM is disabled by default as KVM also needs a set of PARTID to
1947
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1948
* older kernels let the guest see the ID bit.
1949
*/
1950
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1951
1952
return val;
1953
}
1954
1955
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
1956
{
1957
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1958
1959
if (!kvm_has_mte(vcpu->kvm)) {
1960
val &= ~ID_AA64PFR1_EL1_MTE;
1961
val &= ~ID_AA64PFR1_EL1_MTE_frac;
1962
}
1963
1964
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
1965
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
1966
val &= ~ID_AA64PFR1_EL1_RAS_frac;
1967
1968
val &= ~ID_AA64PFR1_EL1_SME;
1969
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
1970
val &= ~ID_AA64PFR1_EL1_NMI;
1971
val &= ~ID_AA64PFR1_EL1_GCS;
1972
val &= ~ID_AA64PFR1_EL1_THE;
1973
val &= ~ID_AA64PFR1_EL1_MTEX;
1974
val &= ~ID_AA64PFR1_EL1_PFAR;
1975
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
1976
1977
return val;
1978
}
1979
1980
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1981
{
1982
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1983
1984
/*
1985
* Only initialize the PMU version if the vCPU was configured with one.
1986
*/
1987
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1988
if (kvm_vcpu_has_pmu(vcpu))
1989
val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1990
kvm_arm_pmu_get_pmuver_limit());
1991
1992
/* Hide SPE from guests */
1993
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1994
1995
/* Hide BRBE from guests */
1996
val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
1997
1998
return val;
1999
}
2000
2001
/*
2002
* Older versions of KVM erroneously claim support for FEAT_DoubleLock with
2003
* NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
2004
* value if it is consistent with the bug.
2005
*/
2006
static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
2007
{
2008
u8 host, user;
2009
2010
if (!vcpu_has_nv(vcpu))
2011
return false;
2012
2013
host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
2014
read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
2015
user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
2016
2017
return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
2018
user == ID_AA64DFR0_EL1_DoubleLock_IMP;
2019
}
2020
2021
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
2022
const struct sys_reg_desc *rd,
2023
u64 val)
2024
{
2025
u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
2026
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
2027
2028
/*
2029
* Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
2030
* ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
2031
* exposed an IMP_DEF PMU to userspace and the guest on systems w/
2032
* non-architectural PMUs. Of course, PMUv3 is the only game in town for
2033
* PMU virtualization, so the IMP_DEF value was rather user-hostile.
2034
*
2035
* At minimum, we're on the hook to allow values that were given to
2036
* userspace by KVM. Cover our tracks here and replace the IMP_DEF value
2037
* with a more sensible NI. The value of an ID register changing under
2038
* the nose of the guest is unfortunate, but is certainly no more
2039
* surprising than an ill-guided PMU driver poking at impdef system
2040
* registers that end in an UNDEF...
2041
*/
2042
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
2043
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2044
2045
/*
2046
* ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
2047
* nonzero minimum safe value.
2048
*/
2049
if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
2050
return -EINVAL;
2051
2052
if (ignore_feat_doublelock(vcpu, val)) {
2053
val &= ~ID_AA64DFR0_EL1_DoubleLock;
2054
val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
2055
}
2056
2057
return set_id_reg(vcpu, rd, val);
2058
}
2059
2060
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
2061
const struct sys_reg_desc *rd)
2062
{
2063
u8 perfmon;
2064
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
2065
2066
val &= ~ID_DFR0_EL1_PerfMon_MASK;
2067
if (kvm_vcpu_has_pmu(vcpu)) {
2068
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
2069
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
2070
}
2071
2072
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
2073
2074
return val;
2075
}
2076
2077
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
2078
const struct sys_reg_desc *rd,
2079
u64 val)
2080
{
2081
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
2082
u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
2083
2084
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
2085
val &= ~ID_DFR0_EL1_PerfMon_MASK;
2086
perfmon = 0;
2087
}
2088
2089
/*
2090
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
2091
* it doesn't promise more than what the HW gives us on the
2092
* AArch64 side (as everything is emulated with that), and
2093
* that this is a PMUv3.
2094
*/
2095
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
2096
return -EINVAL;
2097
2098
if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
2099
return -EINVAL;
2100
2101
return set_id_reg(vcpu, rd, val);
2102
}
2103
2104
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
2105
const struct sys_reg_desc *rd, u64 user_val)
2106
{
2107
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2108
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
2109
2110
/*
2111
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
2112
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
2113
* guests, but didn't add trap handling. KVM doesn't support MPAM and
2114
* always returns an UNDEF for these registers. The guest must see 0
2115
* for this field.
2116
*
2117
* But KVM must also accept values from user-space that were provided
2118
* by KVM. On CPUs that support MPAM, permit user-space to write
2119
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
2120
*/
2121
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2122
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2123
2124
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
2125
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
2126
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
2127
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
2128
return -EINVAL;
2129
2130
/*
2131
* If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
2132
* we support GICv3. Fail attempts to do anything but set that to IMP.
2133
*/
2134
if (vgic_is_v3_compat(vcpu->kvm) &&
2135
FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
2136
return -EINVAL;
2137
2138
return set_id_reg(vcpu, rd, user_val);
2139
}
2140
2141
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
2142
const struct sys_reg_desc *rd, u64 user_val)
2143
{
2144
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2145
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
2146
u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
2147
u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
2148
u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
2149
2150
/* See set_id_aa64pfr0_el1 for comment about MPAM */
2151
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2152
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
2153
2154
/*
2155
* Previously MTE_frac was hidden from guest. However, if the
2156
* hardware supports MTE2 but not MTE_ASYM_FAULT then a value
2157
* of 0 for this field indicates that the hardware supports
2158
* MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
2159
*
2160
* As KVM must accept values from KVM provided by user-space,
2161
* when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
2162
* ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
2163
* incorrectly claiming hardware support for MTE_ASYNC in the
2164
* guest.
2165
*/
2166
2167
if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
2168
hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
2169
user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
2170
user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
2171
user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
2172
}
2173
2174
return set_id_reg(vcpu, rd, user_val);
2175
}
2176
2177
/*
2178
* Allow userspace to de-feature a stage-2 translation granule but prevent it
2179
* from claiming the impossible.
2180
*/
2181
#define tgran2_val_allowed(tg, safe, user) \
2182
({ \
2183
u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe); \
2184
u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \
2185
\
2186
__s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI; \
2187
})
2188
2189
static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
2190
const struct sys_reg_desc *rd, u64 user_val)
2191
{
2192
u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
2193
2194
if (!vcpu_has_nv(vcpu))
2195
return set_id_reg(vcpu, rd, user_val);
2196
2197
if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
2198
!tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
2199
!tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
2200
return -EINVAL;
2201
2202
return set_id_reg(vcpu, rd, user_val);
2203
}
2204
2205
static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2206
const struct sys_reg_desc *rd, u64 user_val)
2207
{
2208
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2209
u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2210
2211
/*
2212
* We made the mistake to expose the now deprecated NV field,
2213
* so allow userspace to write it, but silently ignore it.
2214
*/
2215
if ((hw_val & nv_mask) == (user_val & nv_mask))
2216
user_val &= ~nv_mask;
2217
2218
return set_id_reg(vcpu, rd, user_val);
2219
}
2220
2221
static int set_ctr_el0(struct kvm_vcpu *vcpu,
2222
const struct sys_reg_desc *rd, u64 user_val)
2223
{
2224
u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2225
2226
/*
2227
* Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2228
* Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2229
* on what hardware reports.
2230
*
2231
* Using a VIPT software model on PIPT will lead to over invalidation,
2232
* but still correct. Hence, we can allow downgrading PIPT to VIPT,
2233
* but not the other way around. This is handled via arm64_ftr_safe_value()
2234
* as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2235
* set as VIPT.
2236
*/
2237
switch (user_L1Ip) {
2238
case CTR_EL0_L1Ip_RESERVED_VPIPT:
2239
case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2240
return -EINVAL;
2241
case CTR_EL0_L1Ip_VIPT:
2242
case CTR_EL0_L1Ip_PIPT:
2243
return set_id_reg(vcpu, rd, user_val);
2244
default:
2245
return -ENOENT;
2246
}
2247
}
2248
2249
/*
2250
* cpufeature ID register user accessors
2251
*
2252
* For now, these registers are immutable for userspace, so no values
2253
* are stored, and for set_id_reg() we don't allow the effective value
2254
* to be changed.
2255
*/
2256
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2257
u64 *val)
2258
{
2259
/*
2260
* Avoid locking if the VM has already started, as the ID registers are
2261
* guaranteed to be invariant at that point.
2262
*/
2263
if (kvm_vm_has_ran_once(vcpu->kvm)) {
2264
*val = read_id_reg(vcpu, rd);
2265
return 0;
2266
}
2267
2268
mutex_lock(&vcpu->kvm->arch.config_lock);
2269
*val = read_id_reg(vcpu, rd);
2270
mutex_unlock(&vcpu->kvm->arch.config_lock);
2271
2272
return 0;
2273
}
2274
2275
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2276
u64 val)
2277
{
2278
u32 id = reg_to_encoding(rd);
2279
int ret;
2280
2281
mutex_lock(&vcpu->kvm->arch.config_lock);
2282
2283
/*
2284
* Once the VM has started the ID registers are immutable. Reject any
2285
* write that does not match the final register value.
2286
*/
2287
if (kvm_vm_has_ran_once(vcpu->kvm)) {
2288
if (val != read_id_reg(vcpu, rd))
2289
ret = -EBUSY;
2290
else
2291
ret = 0;
2292
2293
mutex_unlock(&vcpu->kvm->arch.config_lock);
2294
return ret;
2295
}
2296
2297
ret = arm64_check_features(vcpu, rd, val);
2298
if (!ret)
2299
kvm_set_vm_id_reg(vcpu->kvm, id, val);
2300
2301
mutex_unlock(&vcpu->kvm->arch.config_lock);
2302
2303
/*
2304
* arm64_check_features() returns -E2BIG to indicate the register's
2305
* feature set is a superset of the maximally-allowed register value.
2306
* While it would be nice to precisely describe this to userspace, the
2307
* existing UAPI for KVM_SET_ONE_REG has it that invalid register
2308
* writes return -EINVAL.
2309
*/
2310
if (ret == -E2BIG)
2311
ret = -EINVAL;
2312
return ret;
2313
}
2314
2315
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2316
{
2317
u64 *p = __vm_id_reg(&kvm->arch, reg);
2318
2319
lockdep_assert_held(&kvm->arch.config_lock);
2320
2321
if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2322
return;
2323
2324
*p = val;
2325
}
2326
2327
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2328
u64 *val)
2329
{
2330
*val = 0;
2331
return 0;
2332
}
2333
2334
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2335
u64 val)
2336
{
2337
return 0;
2338
}
2339
2340
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2341
const struct sys_reg_desc *r)
2342
{
2343
if (p->is_write)
2344
return write_to_read_only(vcpu, p, r);
2345
2346
p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2347
return true;
2348
}
2349
2350
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2351
const struct sys_reg_desc *r)
2352
{
2353
if (p->is_write)
2354
return write_to_read_only(vcpu, p, r);
2355
2356
p->regval = __vcpu_sys_reg(vcpu, r->reg);
2357
return true;
2358
}
2359
2360
/*
2361
* Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2362
* by the physical CPU which the vcpu currently resides in.
2363
*/
2364
static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2365
{
2366
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2367
u64 clidr;
2368
u8 loc;
2369
2370
if ((ctr_el0 & CTR_EL0_IDC)) {
2371
/*
2372
* Data cache clean to the PoU is not required so LoUU and LoUIS
2373
* will not be set and a unified cache, which will be marked as
2374
* LoC, will be added.
2375
*
2376
* If not DIC, let the unified cache L2 so that an instruction
2377
* cache can be added as L1 later.
2378
*/
2379
loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2380
clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2381
} else {
2382
/*
2383
* Data cache clean to the PoU is required so let L1 have a data
2384
* cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2385
* it can be marked as LoC too.
2386
*/
2387
loc = 1;
2388
clidr = 1 << CLIDR_LOUU_SHIFT;
2389
clidr |= 1 << CLIDR_LOUIS_SHIFT;
2390
clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2391
}
2392
2393
/*
2394
* Instruction cache invalidation to the PoU is required so let L1 have
2395
* an instruction cache. If L1 already has a data cache, it will be
2396
* CACHE_TYPE_SEPARATE.
2397
*/
2398
if (!(ctr_el0 & CTR_EL0_DIC))
2399
clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2400
2401
clidr |= loc << CLIDR_LOC_SHIFT;
2402
2403
/*
2404
* Add tag cache unified to data cache. Allocation tags and data are
2405
* unified in a cache line so that it looks valid even if there is only
2406
* one cache line.
2407
*/
2408
if (kvm_has_mte(vcpu->kvm))
2409
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2410
2411
__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2412
2413
return __vcpu_sys_reg(vcpu, r->reg);
2414
}
2415
2416
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2417
u64 val)
2418
{
2419
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2420
u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2421
2422
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2423
return -EINVAL;
2424
2425
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
2426
2427
return 0;
2428
}
2429
2430
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2431
const struct sys_reg_desc *r)
2432
{
2433
int reg = r->reg;
2434
2435
if (p->is_write)
2436
vcpu_write_sys_reg(vcpu, p->regval, reg);
2437
else
2438
p->regval = vcpu_read_sys_reg(vcpu, reg);
2439
return true;
2440
}
2441
2442
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2443
const struct sys_reg_desc *r)
2444
{
2445
u32 csselr;
2446
2447
if (p->is_write)
2448
return write_to_read_only(vcpu, p, r);
2449
2450
csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2451
csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2452
if (csselr < CSSELR_MAX)
2453
p->regval = get_ccsidr(vcpu, csselr);
2454
2455
return true;
2456
}
2457
2458
static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2459
const struct sys_reg_desc *rd)
2460
{
2461
if (kvm_has_mte(vcpu->kvm))
2462
return 0;
2463
2464
return REG_HIDDEN;
2465
}
2466
2467
#define MTE_REG(name) { \
2468
SYS_DESC(SYS_##name), \
2469
.access = undef_access, \
2470
.reset = reset_unknown, \
2471
.reg = name, \
2472
.visibility = mte_visibility, \
2473
}
2474
2475
static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2476
const struct sys_reg_desc *rd)
2477
{
2478
if (vcpu_has_nv(vcpu))
2479
return 0;
2480
2481
return REG_HIDDEN;
2482
}
2483
2484
static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2485
struct sys_reg_params *p,
2486
const struct sys_reg_desc *r)
2487
{
2488
/*
2489
* We really shouldn't be here, and this is likely the result
2490
* of a misconfigured trap, as this register should target the
2491
* VNCR page, and nothing else.
2492
*/
2493
return bad_trap(vcpu, p, r,
2494
"trap of VNCR-backed register");
2495
}
2496
2497
static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2498
struct sys_reg_params *p,
2499
const struct sys_reg_desc *r)
2500
{
2501
/*
2502
* We really shouldn't be here, and this is likely the result
2503
* of a misconfigured trap, as this register should target the
2504
* corresponding EL1, and nothing else.
2505
*/
2506
return bad_trap(vcpu, p, r,
2507
"trap of EL2 register redirected to EL1");
2508
}
2509
2510
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
2511
SYS_DESC(SYS_##name), \
2512
.access = acc, \
2513
.reset = rst, \
2514
.reg = name, \
2515
.visibility = filter, \
2516
.val = v, \
2517
}
2518
2519
#define EL2_REG(name, acc, rst, v) \
2520
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2521
2522
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2523
#define EL2_REG_VNCR_FILT(name, vis) \
2524
EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
2525
#define EL2_REG_VNCR_GICv3(name) \
2526
EL2_REG_VNCR_FILT(name, hidden_visibility)
2527
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2528
2529
/*
2530
* Since reset() callback and field val are not used for idregs, they will be
2531
* used for specific purposes for idregs.
2532
* The reset() would return KVM sanitised register value. The value would be the
2533
* same as the host kernel sanitised value if there is no KVM sanitisation.
2534
* The val would be used as a mask indicating writable fields for the idreg.
2535
* Only bits with 1 are writable from userspace. This mask might not be
2536
* necessary in the future whenever all ID registers are enabled as writable
2537
* from userspace.
2538
*/
2539
2540
#define ID_DESC_DEFAULT_CALLBACKS \
2541
.access = access_id_reg, \
2542
.get_user = get_id_reg, \
2543
.set_user = set_id_reg, \
2544
.visibility = id_visibility, \
2545
.reset = kvm_read_sanitised_id_reg
2546
2547
#define ID_DESC(name) \
2548
SYS_DESC(SYS_##name), \
2549
ID_DESC_DEFAULT_CALLBACKS
2550
2551
/* sys_reg_desc initialiser for known cpufeature ID registers */
2552
#define ID_SANITISED(name) { \
2553
ID_DESC(name), \
2554
.val = 0, \
2555
}
2556
2557
/* sys_reg_desc initialiser for known cpufeature ID registers */
2558
#define AA32_ID_SANITISED(name) { \
2559
ID_DESC(name), \
2560
.visibility = aa32_id_visibility, \
2561
.val = 0, \
2562
}
2563
2564
/* sys_reg_desc initialiser for writable ID registers */
2565
#define ID_WRITABLE(name, mask) { \
2566
ID_DESC(name), \
2567
.val = mask, \
2568
}
2569
2570
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2571
#define ID_FILTERED(sysreg, name, mask) { \
2572
ID_DESC(sysreg), \
2573
.set_user = set_##name, \
2574
.val = (mask), \
2575
}
2576
2577
/*
2578
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2579
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2580
* (1 <= crm < 8, 0 <= Op2 < 8).
2581
*/
2582
#define ID_UNALLOCATED(crm, op2) { \
2583
.name = "S3_0_0_" #crm "_" #op2, \
2584
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2585
ID_DESC_DEFAULT_CALLBACKS, \
2586
.visibility = raz_visibility, \
2587
.val = 0, \
2588
}
2589
2590
/*
2591
* sys_reg_desc initialiser for known ID registers that we hide from guests.
2592
* For now, these are exposed just like unallocated ID regs: they appear
2593
* RAZ for the guest.
2594
*/
2595
#define ID_HIDDEN(name) { \
2596
ID_DESC(name), \
2597
.visibility = raz_visibility, \
2598
.val = 0, \
2599
}
2600
2601
static bool access_sp_el1(struct kvm_vcpu *vcpu,
2602
struct sys_reg_params *p,
2603
const struct sys_reg_desc *r)
2604
{
2605
if (p->is_write)
2606
__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2607
else
2608
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2609
2610
return true;
2611
}
2612
2613
static bool access_elr(struct kvm_vcpu *vcpu,
2614
struct sys_reg_params *p,
2615
const struct sys_reg_desc *r)
2616
{
2617
if (p->is_write)
2618
vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2619
else
2620
p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2621
2622
return true;
2623
}
2624
2625
static bool access_spsr(struct kvm_vcpu *vcpu,
2626
struct sys_reg_params *p,
2627
const struct sys_reg_desc *r)
2628
{
2629
if (p->is_write)
2630
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2631
else
2632
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2633
2634
return true;
2635
}
2636
2637
static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2638
struct sys_reg_params *p,
2639
const struct sys_reg_desc *r)
2640
{
2641
if (p->is_write)
2642
__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2643
else
2644
p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2645
2646
return true;
2647
}
2648
2649
static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2650
{
2651
u64 val = r->val;
2652
2653
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2654
val |= HCR_E2H;
2655
2656
__vcpu_assign_sys_reg(vcpu, r->reg, val);
2657
2658
return __vcpu_sys_reg(vcpu, r->reg);
2659
}
2660
2661
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2662
const struct sys_reg_desc *rd,
2663
unsigned int (*fn)(const struct kvm_vcpu *,
2664
const struct sys_reg_desc *))
2665
{
2666
return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2667
}
2668
2669
static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2670
const struct sys_reg_desc *rd)
2671
{
2672
return __el2_visibility(vcpu, rd, sve_visibility);
2673
}
2674
2675
static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2676
const struct sys_reg_desc *rd)
2677
{
2678
if (el2_visibility(vcpu, rd) == 0 &&
2679
kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2680
return 0;
2681
2682
return REG_HIDDEN;
2683
}
2684
2685
static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
2686
const struct sys_reg_desc *rd)
2687
{
2688
if (kvm_has_sctlr2(vcpu->kvm))
2689
return 0;
2690
2691
return REG_HIDDEN;
2692
}
2693
2694
static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
2695
const struct sys_reg_desc *rd)
2696
{
2697
return __el2_visibility(vcpu, rd, sctlr2_visibility);
2698
}
2699
2700
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2701
struct sys_reg_params *p,
2702
const struct sys_reg_desc *r)
2703
{
2704
unsigned int vq;
2705
2706
if (guest_hyp_sve_traps_enabled(vcpu)) {
2707
kvm_inject_nested_sve_trap(vcpu);
2708
return true;
2709
}
2710
2711
if (!p->is_write) {
2712
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
2713
return true;
2714
}
2715
2716
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2717
vq = min(vq, vcpu_sve_max_vq(vcpu));
2718
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
2719
2720
return true;
2721
}
2722
2723
static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2724
struct sys_reg_params *p,
2725
const struct sys_reg_desc *r)
2726
{
2727
if (p->is_write)
2728
return write_to_read_only(vcpu, p, r);
2729
2730
p->regval = kvm_get_guest_vtr_el2();
2731
2732
return true;
2733
}
2734
2735
static bool access_gic_misr(struct kvm_vcpu *vcpu,
2736
struct sys_reg_params *p,
2737
const struct sys_reg_desc *r)
2738
{
2739
if (p->is_write)
2740
return write_to_read_only(vcpu, p, r);
2741
2742
p->regval = vgic_v3_get_misr(vcpu);
2743
2744
return true;
2745
}
2746
2747
static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2748
struct sys_reg_params *p,
2749
const struct sys_reg_desc *r)
2750
{
2751
if (p->is_write)
2752
return write_to_read_only(vcpu, p, r);
2753
2754
p->regval = vgic_v3_get_eisr(vcpu);
2755
2756
return true;
2757
}
2758
2759
static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2760
struct sys_reg_params *p,
2761
const struct sys_reg_desc *r)
2762
{
2763
if (p->is_write)
2764
return write_to_read_only(vcpu, p, r);
2765
2766
p->regval = vgic_v3_get_elrsr(vcpu);
2767
2768
return true;
2769
}
2770
2771
static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2772
const struct sys_reg_desc *rd)
2773
{
2774
if (kvm_has_s1poe(vcpu->kvm))
2775
return 0;
2776
2777
return REG_HIDDEN;
2778
}
2779
2780
static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2781
const struct sys_reg_desc *rd)
2782
{
2783
return __el2_visibility(vcpu, rd, s1poe_visibility);
2784
}
2785
2786
static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2787
const struct sys_reg_desc *rd)
2788
{
2789
if (kvm_has_tcr2(vcpu->kvm))
2790
return 0;
2791
2792
return REG_HIDDEN;
2793
}
2794
2795
static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2796
const struct sys_reg_desc *rd)
2797
{
2798
return __el2_visibility(vcpu, rd, tcr2_visibility);
2799
}
2800
2801
static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
2802
const struct sys_reg_desc *rd)
2803
{
2804
if (el2_visibility(vcpu, rd) == 0 &&
2805
kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
2806
return 0;
2807
2808
return REG_HIDDEN;
2809
}
2810
2811
static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
2812
const struct sys_reg_desc *rd)
2813
{
2814
if (el2_visibility(vcpu, rd) == 0 &&
2815
kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
2816
return 0;
2817
2818
return REG_HIDDEN;
2819
}
2820
2821
static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2822
const struct sys_reg_desc *rd)
2823
{
2824
if (kvm_has_s1pie(vcpu->kvm))
2825
return 0;
2826
2827
return REG_HIDDEN;
2828
}
2829
2830
static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2831
const struct sys_reg_desc *rd)
2832
{
2833
return __el2_visibility(vcpu, rd, s1pie_visibility);
2834
}
2835
2836
static bool access_mdcr(struct kvm_vcpu *vcpu,
2837
struct sys_reg_params *p,
2838
const struct sys_reg_desc *r)
2839
{
2840
u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2841
2842
if (!p->is_write) {
2843
p->regval = old;
2844
return true;
2845
}
2846
2847
val = p->regval;
2848
hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
2849
2850
/*
2851
* If HPMN is out of bounds, limit it to what we actually
2852
* support. This matches the UNKNOWN definition of the field
2853
* in that case, and keeps the emulation simple. Sort of.
2854
*/
2855
if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
2856
hpmn = vcpu->kvm->arch.nr_pmu_counters;
2857
u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
2858
}
2859
2860
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
2861
2862
/*
2863
* Request a reload of the PMU to enable/disable the counters
2864
* affected by HPME.
2865
*/
2866
if ((old ^ val) & MDCR_EL2_HPME)
2867
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2868
2869
return true;
2870
}
2871
2872
static bool access_ras(struct kvm_vcpu *vcpu,
2873
struct sys_reg_params *p,
2874
const struct sys_reg_desc *r)
2875
{
2876
struct kvm *kvm = vcpu->kvm;
2877
2878
switch(reg_to_encoding(r)) {
2879
case SYS_ERXPFGCDN_EL1:
2880
case SYS_ERXPFGCTL_EL1:
2881
case SYS_ERXPFGF_EL1:
2882
case SYS_ERXMISC2_EL1:
2883
case SYS_ERXMISC3_EL1:
2884
if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
2885
(kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
2886
kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
2887
kvm_inject_undefined(vcpu);
2888
return false;
2889
}
2890
break;
2891
default:
2892
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
2893
kvm_inject_undefined(vcpu);
2894
return false;
2895
}
2896
}
2897
2898
return trap_raz_wi(vcpu, p, r);
2899
}
2900
2901
/*
2902
* For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2903
* AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2904
* The values made visible to userspace were the register values of the boot
2905
* CPU.
2906
*
2907
* At the same time, reads from these registers at EL1 previously were not
2908
* trapped, allowing the guest to read the actual hardware value. On big-little
2909
* machines, this means the VM can see different values depending on where a
2910
* given vCPU got scheduled.
2911
*
2912
* These registers are now trapped as collateral damage from SME, and what
2913
* follows attempts to give a user / guest view consistent with the existing
2914
* ABI.
2915
*/
2916
static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
2917
struct sys_reg_params *p,
2918
const struct sys_reg_desc *r)
2919
{
2920
if (p->is_write)
2921
return write_to_read_only(vcpu, p, r);
2922
2923
/*
2924
* Return the VM-scoped implementation ID register values if userspace
2925
* has made them writable.
2926
*/
2927
if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
2928
return access_id_reg(vcpu, p, r);
2929
2930
/*
2931
* Otherwise, fall back to the old behavior of returning the value of
2932
* the current CPU.
2933
*/
2934
switch (reg_to_encoding(r)) {
2935
case SYS_REVIDR_EL1:
2936
p->regval = read_sysreg(revidr_el1);
2937
break;
2938
case SYS_AIDR_EL1:
2939
p->regval = read_sysreg(aidr_el1);
2940
break;
2941
default:
2942
WARN_ON_ONCE(1);
2943
}
2944
2945
return true;
2946
}
2947
2948
static u64 __ro_after_init boot_cpu_midr_val;
2949
static u64 __ro_after_init boot_cpu_revidr_val;
2950
static u64 __ro_after_init boot_cpu_aidr_val;
2951
2952
static void init_imp_id_regs(void)
2953
{
2954
boot_cpu_midr_val = read_sysreg(midr_el1);
2955
boot_cpu_revidr_val = read_sysreg(revidr_el1);
2956
boot_cpu_aidr_val = read_sysreg(aidr_el1);
2957
}
2958
2959
static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2960
{
2961
switch (reg_to_encoding(r)) {
2962
case SYS_MIDR_EL1:
2963
return boot_cpu_midr_val;
2964
case SYS_REVIDR_EL1:
2965
return boot_cpu_revidr_val;
2966
case SYS_AIDR_EL1:
2967
return boot_cpu_aidr_val;
2968
default:
2969
KVM_BUG_ON(1, vcpu->kvm);
2970
return 0;
2971
}
2972
}
2973
2974
static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
2975
u64 val)
2976
{
2977
struct kvm *kvm = vcpu->kvm;
2978
u64 expected;
2979
2980
guard(mutex)(&kvm->arch.config_lock);
2981
2982
expected = read_id_reg(vcpu, r);
2983
if (expected == val)
2984
return 0;
2985
2986
if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
2987
return -EINVAL;
2988
2989
/*
2990
* Once the VM has started the ID registers are immutable. Reject the
2991
* write if userspace tries to change it.
2992
*/
2993
if (kvm_vm_has_ran_once(kvm))
2994
return -EBUSY;
2995
2996
/*
2997
* Any value is allowed for the implementation ID registers so long as
2998
* it is within the writable mask.
2999
*/
3000
if ((val & r->val) != val)
3001
return -EINVAL;
3002
3003
kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
3004
return 0;
3005
}
3006
3007
#define IMPLEMENTATION_ID(reg, mask) { \
3008
SYS_DESC(SYS_##reg), \
3009
.access = access_imp_id_reg, \
3010
.get_user = get_id_reg, \
3011
.set_user = set_imp_id_reg, \
3012
.reset = reset_imp_id_reg, \
3013
.val = mask, \
3014
}
3015
3016
static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3017
{
3018
__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
3019
return vcpu->kvm->arch.nr_pmu_counters;
3020
}
3021
3022
/*
3023
* Architected system registers.
3024
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
3025
*
3026
* Debug handling: We do trap most, if not all debug related system
3027
* registers. The implementation is good enough to ensure that a guest
3028
* can use these with minimal performance degradation. The drawback is
3029
* that we don't implement any of the external debug architecture.
3030
* This should be revisited if we ever encounter a more demanding
3031
* guest...
3032
*/
3033
static const struct sys_reg_desc sys_reg_descs[] = {
3034
DBG_BCR_BVR_WCR_WVR_EL1(0),
3035
DBG_BCR_BVR_WCR_WVR_EL1(1),
3036
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
3037
{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
3038
DBG_BCR_BVR_WCR_WVR_EL1(2),
3039
DBG_BCR_BVR_WCR_WVR_EL1(3),
3040
DBG_BCR_BVR_WCR_WVR_EL1(4),
3041
DBG_BCR_BVR_WCR_WVR_EL1(5),
3042
DBG_BCR_BVR_WCR_WVR_EL1(6),
3043
DBG_BCR_BVR_WCR_WVR_EL1(7),
3044
DBG_BCR_BVR_WCR_WVR_EL1(8),
3045
DBG_BCR_BVR_WCR_WVR_EL1(9),
3046
DBG_BCR_BVR_WCR_WVR_EL1(10),
3047
DBG_BCR_BVR_WCR_WVR_EL1(11),
3048
DBG_BCR_BVR_WCR_WVR_EL1(12),
3049
DBG_BCR_BVR_WCR_WVR_EL1(13),
3050
DBG_BCR_BVR_WCR_WVR_EL1(14),
3051
DBG_BCR_BVR_WCR_WVR_EL1(15),
3052
3053
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
3054
{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
3055
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
3056
OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
3057
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
3058
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
3059
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
3060
{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
3061
{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
3062
3063
{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
3064
{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
3065
// DBGDTR[TR]X_EL0 share the same encoding
3066
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
3067
3068
{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
3069
3070
IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
3071
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
3072
IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
3073
3074
/*
3075
* ID regs: all ID_SANITISED() entries here must have corresponding
3076
* entries in arm64_ftr_regs[].
3077
*/
3078
3079
/* AArch64 mappings of the AArch32 ID registers */
3080
/* CRm=1 */
3081
AA32_ID_SANITISED(ID_PFR0_EL1),
3082
AA32_ID_SANITISED(ID_PFR1_EL1),
3083
{ SYS_DESC(SYS_ID_DFR0_EL1),
3084
.access = access_id_reg,
3085
.get_user = get_id_reg,
3086
.set_user = set_id_dfr0_el1,
3087
.visibility = aa32_id_visibility,
3088
.reset = read_sanitised_id_dfr0_el1,
3089
.val = ID_DFR0_EL1_PerfMon_MASK |
3090
ID_DFR0_EL1_CopDbg_MASK, },
3091
ID_HIDDEN(ID_AFR0_EL1),
3092
AA32_ID_SANITISED(ID_MMFR0_EL1),
3093
AA32_ID_SANITISED(ID_MMFR1_EL1),
3094
AA32_ID_SANITISED(ID_MMFR2_EL1),
3095
AA32_ID_SANITISED(ID_MMFR3_EL1),
3096
3097
/* CRm=2 */
3098
AA32_ID_SANITISED(ID_ISAR0_EL1),
3099
AA32_ID_SANITISED(ID_ISAR1_EL1),
3100
AA32_ID_SANITISED(ID_ISAR2_EL1),
3101
AA32_ID_SANITISED(ID_ISAR3_EL1),
3102
AA32_ID_SANITISED(ID_ISAR4_EL1),
3103
AA32_ID_SANITISED(ID_ISAR5_EL1),
3104
AA32_ID_SANITISED(ID_MMFR4_EL1),
3105
AA32_ID_SANITISED(ID_ISAR6_EL1),
3106
3107
/* CRm=3 */
3108
AA32_ID_SANITISED(MVFR0_EL1),
3109
AA32_ID_SANITISED(MVFR1_EL1),
3110
AA32_ID_SANITISED(MVFR2_EL1),
3111
ID_UNALLOCATED(3,3),
3112
AA32_ID_SANITISED(ID_PFR2_EL1),
3113
ID_HIDDEN(ID_DFR1_EL1),
3114
AA32_ID_SANITISED(ID_MMFR5_EL1),
3115
ID_UNALLOCATED(3,7),
3116
3117
/* AArch64 ID registers */
3118
/* CRm=4 */
3119
ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
3120
~(ID_AA64PFR0_EL1_AMU |
3121
ID_AA64PFR0_EL1_MPAM |
3122
ID_AA64PFR0_EL1_SVE |
3123
ID_AA64PFR0_EL1_AdvSIMD |
3124
ID_AA64PFR0_EL1_FP)),
3125
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
3126
~(ID_AA64PFR1_EL1_PFAR |
3127
ID_AA64PFR1_EL1_MTEX |
3128
ID_AA64PFR1_EL1_THE |
3129
ID_AA64PFR1_EL1_GCS |
3130
ID_AA64PFR1_EL1_MTE_frac |
3131
ID_AA64PFR1_EL1_NMI |
3132
ID_AA64PFR1_EL1_RNDR_trap |
3133
ID_AA64PFR1_EL1_SME |
3134
ID_AA64PFR1_EL1_RES0 |
3135
ID_AA64PFR1_EL1_MPAM_frac |
3136
ID_AA64PFR1_EL1_MTE)),
3137
ID_WRITABLE(ID_AA64PFR2_EL1,
3138
ID_AA64PFR2_EL1_FPMR |
3139
ID_AA64PFR2_EL1_MTEFAR |
3140
ID_AA64PFR2_EL1_MTESTOREONLY),
3141
ID_UNALLOCATED(4,3),
3142
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
3143
ID_HIDDEN(ID_AA64SMFR0_EL1),
3144
ID_UNALLOCATED(4,6),
3145
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
3146
3147
/* CRm=5 */
3148
/*
3149
* Prior to FEAT_Debugv8.9, the architecture defines context-aware
3150
* breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
3151
* KVM does not trap + emulate the breakpoint registers, and as such
3152
* cannot support a layout that misaligns with the underlying hardware.
3153
* While it may be possible to describe a subset that aligns with
3154
* hardware, just prevent changes to BRPs and CTX_CMPs altogether for
3155
* simplicity.
3156
*
3157
* See DDI0487K.a, section D2.8.3 Breakpoint types and linking
3158
* of breakpoints for more details.
3159
*/
3160
ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
3161
ID_AA64DFR0_EL1_DoubleLock_MASK |
3162
ID_AA64DFR0_EL1_WRPs_MASK |
3163
ID_AA64DFR0_EL1_PMUVer_MASK |
3164
ID_AA64DFR0_EL1_DebugVer_MASK),
3165
ID_SANITISED(ID_AA64DFR1_EL1),
3166
ID_UNALLOCATED(5,2),
3167
ID_UNALLOCATED(5,3),
3168
ID_HIDDEN(ID_AA64AFR0_EL1),
3169
ID_HIDDEN(ID_AA64AFR1_EL1),
3170
ID_UNALLOCATED(5,6),
3171
ID_UNALLOCATED(5,7),
3172
3173
/* CRm=6 */
3174
ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
3175
ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
3176
ID_AA64ISAR1_EL1_GPA |
3177
ID_AA64ISAR1_EL1_API |
3178
ID_AA64ISAR1_EL1_APA)),
3179
ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
3180
ID_AA64ISAR2_EL1_APA3 |
3181
ID_AA64ISAR2_EL1_GPA3)),
3182
ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
3183
ID_AA64ISAR3_EL1_LSFE |
3184
ID_AA64ISAR3_EL1_FAMINMAX)),
3185
ID_UNALLOCATED(6,4),
3186
ID_UNALLOCATED(6,5),
3187
ID_UNALLOCATED(6,6),
3188
ID_UNALLOCATED(6,7),
3189
3190
/* CRm=7 */
3191
ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
3192
~(ID_AA64MMFR0_EL1_RES0 |
3193
ID_AA64MMFR0_EL1_ASIDBITS)),
3194
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
3195
ID_AA64MMFR1_EL1_XNX |
3196
ID_AA64MMFR1_EL1_VH |
3197
ID_AA64MMFR1_EL1_VMIDBits)),
3198
ID_FILTERED(ID_AA64MMFR2_EL1,
3199
id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
3200
ID_AA64MMFR2_EL1_EVT |
3201
ID_AA64MMFR2_EL1_FWB |
3202
ID_AA64MMFR2_EL1_IDS |
3203
ID_AA64MMFR2_EL1_NV |
3204
ID_AA64MMFR2_EL1_CCIDX)),
3205
ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
3206
ID_AA64MMFR3_EL1_SCTLRX |
3207
ID_AA64MMFR3_EL1_S1PIE |
3208
ID_AA64MMFR3_EL1_S1POE)),
3209
ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
3210
ID_UNALLOCATED(7,5),
3211
ID_UNALLOCATED(7,6),
3212
ID_UNALLOCATED(7,7),
3213
3214
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
3215
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
3216
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
3217
{ SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
3218
.visibility = sctlr2_visibility },
3219
3220
MTE_REG(RGSR_EL1),
3221
MTE_REG(GCR_EL1),
3222
3223
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
3224
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
3225
{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
3226
{ SYS_DESC(SYS_SMCR_EL1), undef_access },
3227
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
3228
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
3229
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
3230
{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
3231
.visibility = tcr2_visibility },
3232
3233
PTRAUTH_KEY(APIA),
3234
PTRAUTH_KEY(APIB),
3235
PTRAUTH_KEY(APDA),
3236
PTRAUTH_KEY(APDB),
3237
PTRAUTH_KEY(APGA),
3238
3239
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
3240
{ SYS_DESC(SYS_ELR_EL1), access_elr},
3241
3242
{ SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3243
3244
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
3245
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
3246
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
3247
3248
{ SYS_DESC(SYS_ERRIDR_EL1), access_ras },
3249
{ SYS_DESC(SYS_ERRSELR_EL1), access_ras },
3250
{ SYS_DESC(SYS_ERXFR_EL1), access_ras },
3251
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
3252
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
3253
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
3254
{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
3255
{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
3256
{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
3257
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
3258
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
3259
{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
3260
{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
3261
3262
MTE_REG(TFSR_EL1),
3263
MTE_REG(TFSRE0_EL1),
3264
3265
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3266
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3267
3268
{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
3269
{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3270
{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
3271
{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3272
{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3273
{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3274
{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3275
{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3276
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3277
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3278
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
3279
{ SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
3280
/* PMBIDR_EL1 is not trapped */
3281
3282
{ PMU_SYS_REG(PMINTENSET_EL1),
3283
.access = access_pminten, .reg = PMINTENSET_EL1,
3284
.get_user = get_pmreg, .set_user = set_pmreg },
3285
{ PMU_SYS_REG(PMINTENCLR_EL1),
3286
.access = access_pminten, .reg = PMINTENSET_EL1,
3287
.get_user = get_pmreg, .set_user = set_pmreg },
3288
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3289
3290
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3291
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3292
.visibility = s1pie_visibility },
3293
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3294
.visibility = s1pie_visibility },
3295
{ SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3296
.visibility = s1poe_visibility },
3297
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3298
3299
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3300
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3301
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
3302
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
3303
{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3304
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
3305
3306
{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
3307
{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
3308
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3309
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3310
3311
{ SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3312
{ SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3313
{ SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3314
{ SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3315
{ SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3316
{ SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3317
{ SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3318
{ SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3319
{ SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3320
{ SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3321
{ SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3322
{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3323
{ SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
3324
{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3325
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3326
{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3327
{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3328
{ SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3329
{ SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3330
{ SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3331
{ SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3332
{ SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3333
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3334
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3335
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3336
3337
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3338
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3339
3340
{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3341
3342
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3343
3344
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3345
3346
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3347
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3348
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3349
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
3350
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
3351
IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3352
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3353
ID_FILTERED(CTR_EL0, ctr_el0,
3354
CTR_EL0_DIC_MASK |
3355
CTR_EL0_IDC_MASK |
3356
CTR_EL0_DminLine_MASK |
3357
CTR_EL0_L1Ip_MASK |
3358
CTR_EL0_IminLine_MASK),
3359
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
3360
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3361
3362
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3363
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3364
{ PMU_SYS_REG(PMCNTENSET_EL0),
3365
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
3366
.get_user = get_pmreg, .set_user = set_pmreg },
3367
{ PMU_SYS_REG(PMCNTENCLR_EL0),
3368
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
3369
.get_user = get_pmreg, .set_user = set_pmreg },
3370
{ PMU_SYS_REG(PMOVSCLR_EL0),
3371
.access = access_pmovs, .reg = PMOVSSET_EL0,
3372
.get_user = get_pmreg, .set_user = set_pmreg },
3373
/*
3374
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3375
* previously (and pointlessly) advertised in the past...
3376
*/
3377
{ PMU_SYS_REG(PMSWINC_EL0),
3378
.get_user = get_raz_reg, .set_user = set_wi_reg,
3379
.access = access_pmswinc, .reset = NULL },
3380
{ PMU_SYS_REG(PMSELR_EL0),
3381
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3382
{ PMU_SYS_REG(PMCEID0_EL0),
3383
.access = access_pmceid, .reset = NULL },
3384
{ PMU_SYS_REG(PMCEID1_EL0),
3385
.access = access_pmceid, .reset = NULL },
3386
{ PMU_SYS_REG(PMCCNTR_EL0),
3387
.access = access_pmu_evcntr, .reset = reset_unknown,
3388
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3389
.set_user = set_pmu_evcntr },
3390
{ PMU_SYS_REG(PMXEVTYPER_EL0),
3391
.access = access_pmu_evtyper, .reset = NULL },
3392
{ PMU_SYS_REG(PMXEVCNTR_EL0),
3393
.access = access_pmu_evcntr, .reset = NULL },
3394
/*
3395
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3396
* in 32bit mode. Here we choose to reset it as zero for consistency.
3397
*/
3398
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3399
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3400
{ PMU_SYS_REG(PMOVSSET_EL0),
3401
.access = access_pmovs, .reg = PMOVSSET_EL0,
3402
.get_user = get_pmreg, .set_user = set_pmreg },
3403
3404
{ SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3405
.visibility = s1poe_visibility },
3406
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3407
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3408
{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3409
3410
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3411
3412
{ SYS_DESC(SYS_AMCR_EL0), undef_access },
3413
{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3414
{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3415
{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3416
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3417
{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3418
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3419
{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3420
AMU_AMEVCNTR0_EL0(0),
3421
AMU_AMEVCNTR0_EL0(1),
3422
AMU_AMEVCNTR0_EL0(2),
3423
AMU_AMEVCNTR0_EL0(3),
3424
AMU_AMEVCNTR0_EL0(4),
3425
AMU_AMEVCNTR0_EL0(5),
3426
AMU_AMEVCNTR0_EL0(6),
3427
AMU_AMEVCNTR0_EL0(7),
3428
AMU_AMEVCNTR0_EL0(8),
3429
AMU_AMEVCNTR0_EL0(9),
3430
AMU_AMEVCNTR0_EL0(10),
3431
AMU_AMEVCNTR0_EL0(11),
3432
AMU_AMEVCNTR0_EL0(12),
3433
AMU_AMEVCNTR0_EL0(13),
3434
AMU_AMEVCNTR0_EL0(14),
3435
AMU_AMEVCNTR0_EL0(15),
3436
AMU_AMEVTYPER0_EL0(0),
3437
AMU_AMEVTYPER0_EL0(1),
3438
AMU_AMEVTYPER0_EL0(2),
3439
AMU_AMEVTYPER0_EL0(3),
3440
AMU_AMEVTYPER0_EL0(4),
3441
AMU_AMEVTYPER0_EL0(5),
3442
AMU_AMEVTYPER0_EL0(6),
3443
AMU_AMEVTYPER0_EL0(7),
3444
AMU_AMEVTYPER0_EL0(8),
3445
AMU_AMEVTYPER0_EL0(9),
3446
AMU_AMEVTYPER0_EL0(10),
3447
AMU_AMEVTYPER0_EL0(11),
3448
AMU_AMEVTYPER0_EL0(12),
3449
AMU_AMEVTYPER0_EL0(13),
3450
AMU_AMEVTYPER0_EL0(14),
3451
AMU_AMEVTYPER0_EL0(15),
3452
AMU_AMEVCNTR1_EL0(0),
3453
AMU_AMEVCNTR1_EL0(1),
3454
AMU_AMEVCNTR1_EL0(2),
3455
AMU_AMEVCNTR1_EL0(3),
3456
AMU_AMEVCNTR1_EL0(4),
3457
AMU_AMEVCNTR1_EL0(5),
3458
AMU_AMEVCNTR1_EL0(6),
3459
AMU_AMEVCNTR1_EL0(7),
3460
AMU_AMEVCNTR1_EL0(8),
3461
AMU_AMEVCNTR1_EL0(9),
3462
AMU_AMEVCNTR1_EL0(10),
3463
AMU_AMEVCNTR1_EL0(11),
3464
AMU_AMEVCNTR1_EL0(12),
3465
AMU_AMEVCNTR1_EL0(13),
3466
AMU_AMEVCNTR1_EL0(14),
3467
AMU_AMEVCNTR1_EL0(15),
3468
AMU_AMEVTYPER1_EL0(0),
3469
AMU_AMEVTYPER1_EL0(1),
3470
AMU_AMEVTYPER1_EL0(2),
3471
AMU_AMEVTYPER1_EL0(3),
3472
AMU_AMEVTYPER1_EL0(4),
3473
AMU_AMEVTYPER1_EL0(5),
3474
AMU_AMEVTYPER1_EL0(6),
3475
AMU_AMEVTYPER1_EL0(7),
3476
AMU_AMEVTYPER1_EL0(8),
3477
AMU_AMEVTYPER1_EL0(9),
3478
AMU_AMEVTYPER1_EL0(10),
3479
AMU_AMEVTYPER1_EL0(11),
3480
AMU_AMEVTYPER1_EL0(12),
3481
AMU_AMEVTYPER1_EL0(13),
3482
AMU_AMEVTYPER1_EL0(14),
3483
AMU_AMEVTYPER1_EL0(15),
3484
3485
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
3486
{ SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
3487
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3488
{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3489
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3490
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
3491
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
3492
3493
{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3494
{ SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
3495
{ SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
3496
3497
/* PMEVCNTRn_EL0 */
3498
PMU_PMEVCNTR_EL0(0),
3499
PMU_PMEVCNTR_EL0(1),
3500
PMU_PMEVCNTR_EL0(2),
3501
PMU_PMEVCNTR_EL0(3),
3502
PMU_PMEVCNTR_EL0(4),
3503
PMU_PMEVCNTR_EL0(5),
3504
PMU_PMEVCNTR_EL0(6),
3505
PMU_PMEVCNTR_EL0(7),
3506
PMU_PMEVCNTR_EL0(8),
3507
PMU_PMEVCNTR_EL0(9),
3508
PMU_PMEVCNTR_EL0(10),
3509
PMU_PMEVCNTR_EL0(11),
3510
PMU_PMEVCNTR_EL0(12),
3511
PMU_PMEVCNTR_EL0(13),
3512
PMU_PMEVCNTR_EL0(14),
3513
PMU_PMEVCNTR_EL0(15),
3514
PMU_PMEVCNTR_EL0(16),
3515
PMU_PMEVCNTR_EL0(17),
3516
PMU_PMEVCNTR_EL0(18),
3517
PMU_PMEVCNTR_EL0(19),
3518
PMU_PMEVCNTR_EL0(20),
3519
PMU_PMEVCNTR_EL0(21),
3520
PMU_PMEVCNTR_EL0(22),
3521
PMU_PMEVCNTR_EL0(23),
3522
PMU_PMEVCNTR_EL0(24),
3523
PMU_PMEVCNTR_EL0(25),
3524
PMU_PMEVCNTR_EL0(26),
3525
PMU_PMEVCNTR_EL0(27),
3526
PMU_PMEVCNTR_EL0(28),
3527
PMU_PMEVCNTR_EL0(29),
3528
PMU_PMEVCNTR_EL0(30),
3529
/* PMEVTYPERn_EL0 */
3530
PMU_PMEVTYPER_EL0(0),
3531
PMU_PMEVTYPER_EL0(1),
3532
PMU_PMEVTYPER_EL0(2),
3533
PMU_PMEVTYPER_EL0(3),
3534
PMU_PMEVTYPER_EL0(4),
3535
PMU_PMEVTYPER_EL0(5),
3536
PMU_PMEVTYPER_EL0(6),
3537
PMU_PMEVTYPER_EL0(7),
3538
PMU_PMEVTYPER_EL0(8),
3539
PMU_PMEVTYPER_EL0(9),
3540
PMU_PMEVTYPER_EL0(10),
3541
PMU_PMEVTYPER_EL0(11),
3542
PMU_PMEVTYPER_EL0(12),
3543
PMU_PMEVTYPER_EL0(13),
3544
PMU_PMEVTYPER_EL0(14),
3545
PMU_PMEVTYPER_EL0(15),
3546
PMU_PMEVTYPER_EL0(16),
3547
PMU_PMEVTYPER_EL0(17),
3548
PMU_PMEVTYPER_EL0(18),
3549
PMU_PMEVTYPER_EL0(19),
3550
PMU_PMEVTYPER_EL0(20),
3551
PMU_PMEVTYPER_EL0(21),
3552
PMU_PMEVTYPER_EL0(22),
3553
PMU_PMEVTYPER_EL0(23),
3554
PMU_PMEVTYPER_EL0(24),
3555
PMU_PMEVTYPER_EL0(25),
3556
PMU_PMEVTYPER_EL0(26),
3557
PMU_PMEVTYPER_EL0(27),
3558
PMU_PMEVTYPER_EL0(28),
3559
PMU_PMEVTYPER_EL0(29),
3560
PMU_PMEVTYPER_EL0(30),
3561
/*
3562
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3563
* in 32bit mode. Here we choose to reset it as zero for consistency.
3564
*/
3565
{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3566
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3567
3568
EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3569
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3570
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3571
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3572
EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
3573
sctlr2_el2_visibility),
3574
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3575
EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3576
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3577
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3578
EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
3579
EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
3580
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3581
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3582
3583
EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3584
sve_el2_visibility),
3585
3586
EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3587
3588
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3589
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3590
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3591
EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3592
tcr2_el2_visibility),
3593
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3594
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3595
EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3596
vncr_el2_visibility),
3597
3598
{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3599
EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
3600
EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
3601
EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
3602
EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
3603
EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
3604
EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
3605
EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
3606
EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
3607
EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3608
EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3609
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
3610
3611
/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3612
{ SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3613
{ SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3614
{ SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3615
{ SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3616
3617
{ SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3618
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3619
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3620
EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3621
EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
3622
{ SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3623
3624
EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3625
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3626
3627
EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3628
EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3629
s1pie_el2_visibility),
3630
EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3631
s1pie_el2_visibility),
3632
EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3633
s1poe_el2_visibility),
3634
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3635
{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3636
{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3637
{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
3638
{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3639
{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3640
{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3641
{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3642
{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3643
{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3644
{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3645
{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3646
3647
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3648
{ SYS_DESC(SYS_RVBAR_EL2), undef_access },
3649
{ SYS_DESC(SYS_RMR_EL2), undef_access },
3650
EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
3651
3652
EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
3653
EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
3654
EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
3655
EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
3656
EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
3657
EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
3658
EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
3659
EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
3660
3661
{ SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3662
3663
EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
3664
{ SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3665
{ SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3666
{ SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3667
{ SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3668
EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
3669
3670
EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
3671
EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
3672
EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
3673
EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
3674
EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
3675
EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
3676
EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
3677
EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
3678
EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
3679
EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
3680
EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
3681
EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
3682
EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
3683
EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
3684
EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
3685
EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
3686
3687
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3688
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3689
3690
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3691
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3692
{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3693
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
3694
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
3695
3696
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
3697
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
3698
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
3699
3700
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3701
3702
{ SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3703
{ SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3704
{ SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3705
3706
{ SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3707
{ SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3708
{ SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3709
3710
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3711
};
3712
3713
static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3714
const struct sys_reg_desc *r)
3715
{
3716
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3717
3718
__kvm_at_s1e01(vcpu, op, p->regval);
3719
3720
return true;
3721
}
3722
3723
static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3724
const struct sys_reg_desc *r)
3725
{
3726
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3727
3728
/* There is no FGT associated with AT S1E2A :-( */
3729
if (op == OP_AT_S1E2A &&
3730
!kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3731
kvm_inject_undefined(vcpu);
3732
return false;
3733
}
3734
3735
__kvm_at_s1e2(vcpu, op, p->regval);
3736
3737
return true;
3738
}
3739
3740
static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3741
const struct sys_reg_desc *r)
3742
{
3743
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3744
3745
__kvm_at_s12(vcpu, op, p->regval);
3746
3747
return true;
3748
}
3749
3750
static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3751
{
3752
struct kvm *kvm = vpcu->kvm;
3753
u8 CRm = sys_reg_CRm(instr);
3754
3755
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3756
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3757
return false;
3758
3759
if (CRm == TLBI_CRm_nROS &&
3760
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3761
return false;
3762
3763
return true;
3764
}
3765
3766
static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3767
const struct sys_reg_desc *r)
3768
{
3769
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3770
3771
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3772
return undef_access(vcpu, p, r);
3773
3774
write_lock(&vcpu->kvm->mmu_lock);
3775
3776
/*
3777
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3778
* corresponding VMIDs.
3779
*/
3780
kvm_nested_s2_unmap(vcpu->kvm, true);
3781
3782
write_unlock(&vcpu->kvm->mmu_lock);
3783
3784
return true;
3785
}
3786
3787
static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3788
{
3789
struct kvm *kvm = vpcu->kvm;
3790
u8 CRm = sys_reg_CRm(instr);
3791
u8 Op2 = sys_reg_Op2(instr);
3792
3793
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3794
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3795
return false;
3796
3797
if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3798
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3799
return false;
3800
3801
if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3802
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3803
return false;
3804
3805
if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3806
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3807
return false;
3808
3809
return true;
3810
}
3811
3812
/* Only defined here as this is an internal "abstraction" */
3813
union tlbi_info {
3814
struct {
3815
u64 start;
3816
u64 size;
3817
} range;
3818
3819
struct {
3820
u64 addr;
3821
} ipa;
3822
3823
struct {
3824
u64 addr;
3825
u32 encoding;
3826
} va;
3827
};
3828
3829
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
3830
const union tlbi_info *info)
3831
{
3832
/*
3833
* The unmap operation is allowed to drop the MMU lock and block, which
3834
* means that @mmu could be used for a different context than the one
3835
* currently being invalidated.
3836
*
3837
* This behavior is still safe, as:
3838
*
3839
* 1) The vCPU(s) that recycled the MMU are responsible for invalidating
3840
* the entire MMU before reusing it, which still honors the intent
3841
* of a TLBI.
3842
*
3843
* 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
3844
* and ERET to the guest), other vCPUs are allowed to use stale
3845
* translations.
3846
*
3847
* 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
3848
* at worst may cause more aborts for shadow stage-2 fills.
3849
*
3850
* Dropping the MMU lock also implies that shadow stage-2 fills could
3851
* happen behind the back of the TLBI. This is still safe, though, as
3852
* the L1 needs to put its stage-2 in a consistent state before doing
3853
* the TLBI.
3854
*/
3855
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
3856
}
3857
3858
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3859
const struct sys_reg_desc *r)
3860
{
3861
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3862
u64 limit, vttbr;
3863
3864
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3865
return undef_access(vcpu, p, r);
3866
3867
vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3868
limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
3869
3870
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3871
&(union tlbi_info) {
3872
.range = {
3873
.start = 0,
3874
.size = limit,
3875
},
3876
},
3877
s2_mmu_unmap_range);
3878
3879
return true;
3880
}
3881
3882
static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3883
const struct sys_reg_desc *r)
3884
{
3885
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3886
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3887
u64 base, range;
3888
3889
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3890
return undef_access(vcpu, p, r);
3891
3892
/*
3893
* Because the shadow S2 structure doesn't necessarily reflect that
3894
* of the guest's S2 (different base granule size, for example), we
3895
* decide to ignore TTL and only use the described range.
3896
*/
3897
base = decode_range_tlbi(p->regval, &range, NULL);
3898
3899
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3900
&(union tlbi_info) {
3901
.range = {
3902
.start = base,
3903
.size = range,
3904
},
3905
},
3906
s2_mmu_unmap_range);
3907
3908
return true;
3909
}
3910
3911
static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3912
const union tlbi_info *info)
3913
{
3914
unsigned long max_size;
3915
u64 base_addr;
3916
3917
/*
3918
* We drop a number of things from the supplied value:
3919
*
3920
* - NS bit: we're non-secure only.
3921
*
3922
* - IPA[51:48]: We don't support 52bit IPA just yet...
3923
*
3924
* And of course, adjust the IPA to be on an actual address.
3925
*/
3926
base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
3927
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
3928
base_addr &= ~(max_size - 1);
3929
3930
/*
3931
* See comment in s2_mmu_unmap_range() for why this is allowed to
3932
* reschedule.
3933
*/
3934
kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
3935
}
3936
3937
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3938
const struct sys_reg_desc *r)
3939
{
3940
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3941
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3942
3943
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3944
return undef_access(vcpu, p, r);
3945
3946
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3947
&(union tlbi_info) {
3948
.ipa = {
3949
.addr = p->regval,
3950
},
3951
},
3952
s2_mmu_unmap_ipa);
3953
3954
return true;
3955
}
3956
3957
static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
3958
const union tlbi_info *info)
3959
{
3960
WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
3961
}
3962
3963
static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3964
const struct sys_reg_desc *r)
3965
{
3966
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3967
3968
if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
3969
return undef_access(vcpu, p, r);
3970
3971
kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
3972
return true;
3973
}
3974
3975
static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3976
const struct sys_reg_desc *r)
3977
{
3978
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3979
3980
/*
3981
* If we're here, this is because we've trapped on a EL1 TLBI
3982
* instruction that affects the EL1 translation regime while
3983
* we're running in a context that doesn't allow us to let the
3984
* HW do its thing (aka vEL2):
3985
*
3986
* - HCR_EL2.E2H == 0 : a non-VHE guest
3987
* - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
3988
*
3989
* Another possibility is that we are invalidating the EL2 context
3990
* using EL1 instructions, but that we landed here because we need
3991
* additional invalidation for structures that are not held in the
3992
* CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
3993
* that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
3994
* as we don't allow an NV-capable L1 in a nVHE configuration.
3995
*
3996
* We don't expect these helpers to ever be called when running
3997
* in a vEL1 context.
3998
*/
3999
4000
WARN_ON(!vcpu_is_el2(vcpu));
4001
4002
if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
4003
return undef_access(vcpu, p, r);
4004
4005
if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
4006
kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4007
return true;
4008
}
4009
4010
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
4011
get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
4012
&(union tlbi_info) {
4013
.va = {
4014
.addr = p->regval,
4015
.encoding = sys_encoding,
4016
},
4017
},
4018
s2_mmu_tlbi_s1e1);
4019
4020
return true;
4021
}
4022
4023
#define SYS_INSN(insn, access_fn) \
4024
{ \
4025
SYS_DESC(OP_##insn), \
4026
.access = (access_fn), \
4027
}
4028
4029
static struct sys_reg_desc sys_insn_descs[] = {
4030
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
4031
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
4032
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
4033
4034
SYS_INSN(AT_S1E1R, handle_at_s1e01),
4035
SYS_INSN(AT_S1E1W, handle_at_s1e01),
4036
SYS_INSN(AT_S1E0R, handle_at_s1e01),
4037
SYS_INSN(AT_S1E0W, handle_at_s1e01),
4038
SYS_INSN(AT_S1E1RP, handle_at_s1e01),
4039
SYS_INSN(AT_S1E1WP, handle_at_s1e01),
4040
4041
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
4042
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
4043
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
4044
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
4045
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
4046
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
4047
4048
SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
4049
SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
4050
SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
4051
SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
4052
SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
4053
SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
4054
4055
SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
4056
SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
4057
SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
4058
SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
4059
4060
SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
4061
SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
4062
SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
4063
SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
4064
SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
4065
SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
4066
4067
SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
4068
SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
4069
SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
4070
SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
4071
4072
SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
4073
SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
4074
SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
4075
SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
4076
4077
SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
4078
SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
4079
SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
4080
SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
4081
SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
4082
SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
4083
4084
SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
4085
SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
4086
SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
4087
SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
4088
SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
4089
SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
4090
4091
SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
4092
SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
4093
SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
4094
SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
4095
4096
SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
4097
SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
4098
SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
4099
SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
4100
SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
4101
SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
4102
4103
SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
4104
SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
4105
SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
4106
SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
4107
4108
SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
4109
SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
4110
SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
4111
SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
4112
4113
SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
4114
SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
4115
SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
4116
SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
4117
SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
4118
SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
4119
4120
SYS_INSN(AT_S1E2R, handle_at_s1e2),
4121
SYS_INSN(AT_S1E2W, handle_at_s1e2),
4122
SYS_INSN(AT_S12E1R, handle_at_s12),
4123
SYS_INSN(AT_S12E1W, handle_at_s12),
4124
SYS_INSN(AT_S12E0R, handle_at_s12),
4125
SYS_INSN(AT_S12E0W, handle_at_s12),
4126
SYS_INSN(AT_S1E2A, handle_at_s1e2),
4127
4128
SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
4129
SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
4130
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
4131
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
4132
4133
SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
4134
SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
4135
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
4136
SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
4137
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
4138
4139
SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
4140
SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
4141
SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
4142
SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
4143
4144
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
4145
4146
SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
4147
4148
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
4149
SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
4150
SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
4151
SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
4152
SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
4153
SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
4154
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
4155
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
4156
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
4157
SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
4158
SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
4159
SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
4160
SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
4161
SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
4162
SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
4163
4164
SYS_INSN(TLBI_ALLE1, handle_alle1is),
4165
4166
SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
4167
4168
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
4169
4170
SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
4171
SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
4172
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
4173
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
4174
4175
SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
4176
SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
4177
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
4178
SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
4179
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
4180
4181
SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
4182
SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
4183
SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
4184
SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
4185
4186
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
4187
SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
4188
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
4189
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
4190
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
4191
SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
4192
SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
4193
SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
4194
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
4195
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
4196
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
4197
SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
4198
SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
4199
SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
4200
SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
4201
SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
4202
SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
4203
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
4204
SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
4205
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
4206
};
4207
4208
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
4209
struct sys_reg_params *p,
4210
const struct sys_reg_desc *r)
4211
{
4212
if (p->is_write) {
4213
return ignore_write(vcpu, p);
4214
} else {
4215
u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
4216
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
4217
4218
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
4219
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
4220
(SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
4221
(SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
4222
(1 << 15) | (el3 << 14) | (el3 << 12));
4223
return true;
4224
}
4225
}
4226
4227
/*
4228
* AArch32 debug register mappings
4229
*
4230
* AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
4231
* AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
4232
*
4233
* None of the other registers share their location, so treat them as
4234
* if they were 64bit.
4235
*/
4236
#define DBG_BCR_BVR_WCR_WVR(n) \
4237
/* DBGBVRn */ \
4238
{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
4239
trap_dbg_wb_reg, NULL, n }, \
4240
/* DBGBCRn */ \
4241
{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
4242
/* DBGWVRn */ \
4243
{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
4244
/* DBGWCRn */ \
4245
{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
4246
4247
#define DBGBXVR(n) \
4248
{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
4249
trap_dbg_wb_reg, NULL, n }
4250
4251
/*
4252
* Trapped cp14 registers. We generally ignore most of the external
4253
* debug, on the principle that they don't really make sense to a
4254
* guest. Revisit this one day, would this principle change.
4255
*/
4256
static const struct sys_reg_desc cp14_regs[] = {
4257
/* DBGDIDR */
4258
{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
4259
/* DBGDTRRXext */
4260
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
4261
4262
DBG_BCR_BVR_WCR_WVR(0),
4263
/* DBGDSCRint */
4264
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
4265
DBG_BCR_BVR_WCR_WVR(1),
4266
/* DBGDCCINT */
4267
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
4268
/* DBGDSCRext */
4269
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
4270
DBG_BCR_BVR_WCR_WVR(2),
4271
/* DBGDTR[RT]Xint */
4272
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
4273
/* DBGDTR[RT]Xext */
4274
{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
4275
DBG_BCR_BVR_WCR_WVR(3),
4276
DBG_BCR_BVR_WCR_WVR(4),
4277
DBG_BCR_BVR_WCR_WVR(5),
4278
/* DBGWFAR */
4279
{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4280
/* DBGOSECCR */
4281
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4282
DBG_BCR_BVR_WCR_WVR(6),
4283
/* DBGVCR */
4284
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4285
DBG_BCR_BVR_WCR_WVR(7),
4286
DBG_BCR_BVR_WCR_WVR(8),
4287
DBG_BCR_BVR_WCR_WVR(9),
4288
DBG_BCR_BVR_WCR_WVR(10),
4289
DBG_BCR_BVR_WCR_WVR(11),
4290
DBG_BCR_BVR_WCR_WVR(12),
4291
DBG_BCR_BVR_WCR_WVR(13),
4292
DBG_BCR_BVR_WCR_WVR(14),
4293
DBG_BCR_BVR_WCR_WVR(15),
4294
4295
/* DBGDRAR (32bit) */
4296
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4297
4298
DBGBXVR(0),
4299
/* DBGOSLAR */
4300
{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4301
DBGBXVR(1),
4302
/* DBGOSLSR */
4303
{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4304
DBGBXVR(2),
4305
DBGBXVR(3),
4306
/* DBGOSDLR */
4307
{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4308
DBGBXVR(4),
4309
/* DBGPRCR */
4310
{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4311
DBGBXVR(5),
4312
DBGBXVR(6),
4313
DBGBXVR(7),
4314
DBGBXVR(8),
4315
DBGBXVR(9),
4316
DBGBXVR(10),
4317
DBGBXVR(11),
4318
DBGBXVR(12),
4319
DBGBXVR(13),
4320
DBGBXVR(14),
4321
DBGBXVR(15),
4322
4323
/* DBGDSAR (32bit) */
4324
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4325
4326
/* DBGDEVID2 */
4327
{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4328
/* DBGDEVID1 */
4329
{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4330
/* DBGDEVID */
4331
{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4332
/* DBGCLAIMSET */
4333
{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4334
/* DBGCLAIMCLR */
4335
{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4336
/* DBGAUTHSTATUS */
4337
{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4338
};
4339
4340
/* Trapped cp14 64bit registers */
4341
static const struct sys_reg_desc cp14_64_regs[] = {
4342
/* DBGDRAR (64bit) */
4343
{ Op1( 0), CRm( 1), .access = trap_raz_wi },
4344
4345
/* DBGDSAR (64bit) */
4346
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
4347
};
4348
4349
#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
4350
AA32(_map), \
4351
Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
4352
.visibility = pmu_visibility
4353
4354
/* Macro to expand the PMEVCNTRn register */
4355
#define PMU_PMEVCNTR(n) \
4356
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4357
(0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4358
.access = access_pmu_evcntr }
4359
4360
/* Macro to expand the PMEVTYPERn register */
4361
#define PMU_PMEVTYPER(n) \
4362
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4363
(0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4364
.access = access_pmu_evtyper }
4365
/*
4366
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4367
* depending on the way they are accessed (as a 32bit or a 64bit
4368
* register).
4369
*/
4370
static const struct sys_reg_desc cp15_regs[] = {
4371
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4372
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4373
/* ACTLR */
4374
{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4375
/* ACTLR2 */
4376
{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4377
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4378
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4379
/* TTBCR */
4380
{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4381
/* TTBCR2 */
4382
{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4383
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4384
{ CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4385
/* DFSR */
4386
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4387
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4388
/* ADFSR */
4389
{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4390
/* AIFSR */
4391
{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4392
/* DFAR */
4393
{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4394
/* IFAR */
4395
{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4396
4397
/*
4398
* DC{C,I,CI}SW operations:
4399
*/
4400
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4401
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4402
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4403
4404
/* PMU */
4405
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4406
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4407
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4408
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4409
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4410
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4411
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
4412
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
4413
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4414
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4415
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4416
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4417
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4418
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4419
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4420
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
4421
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
4422
/* PMMIR */
4423
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4424
4425
/* PRRR/MAIR0 */
4426
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4427
/* NMRR/MAIR1 */
4428
{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4429
/* AMAIR0 */
4430
{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4431
/* AMAIR1 */
4432
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4433
4434
{ CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4435
{ CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4436
{ CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4437
{ CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4438
{ CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4439
{ CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4440
{ CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4441
{ CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4442
{ CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4443
{ CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4444
{ CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4445
{ CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4446
{ CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
4447
{ CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4448
{ CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4449
{ CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4450
{ CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4451
{ CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4452
{ CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4453
{ CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4454
{ CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4455
{ CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4456
4457
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4458
4459
/* Arch Tmers */
4460
{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4461
{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4462
4463
/* PMEVCNTRn */
4464
PMU_PMEVCNTR(0),
4465
PMU_PMEVCNTR(1),
4466
PMU_PMEVCNTR(2),
4467
PMU_PMEVCNTR(3),
4468
PMU_PMEVCNTR(4),
4469
PMU_PMEVCNTR(5),
4470
PMU_PMEVCNTR(6),
4471
PMU_PMEVCNTR(7),
4472
PMU_PMEVCNTR(8),
4473
PMU_PMEVCNTR(9),
4474
PMU_PMEVCNTR(10),
4475
PMU_PMEVCNTR(11),
4476
PMU_PMEVCNTR(12),
4477
PMU_PMEVCNTR(13),
4478
PMU_PMEVCNTR(14),
4479
PMU_PMEVCNTR(15),
4480
PMU_PMEVCNTR(16),
4481
PMU_PMEVCNTR(17),
4482
PMU_PMEVCNTR(18),
4483
PMU_PMEVCNTR(19),
4484
PMU_PMEVCNTR(20),
4485
PMU_PMEVCNTR(21),
4486
PMU_PMEVCNTR(22),
4487
PMU_PMEVCNTR(23),
4488
PMU_PMEVCNTR(24),
4489
PMU_PMEVCNTR(25),
4490
PMU_PMEVCNTR(26),
4491
PMU_PMEVCNTR(27),
4492
PMU_PMEVCNTR(28),
4493
PMU_PMEVCNTR(29),
4494
PMU_PMEVCNTR(30),
4495
/* PMEVTYPERn */
4496
PMU_PMEVTYPER(0),
4497
PMU_PMEVTYPER(1),
4498
PMU_PMEVTYPER(2),
4499
PMU_PMEVTYPER(3),
4500
PMU_PMEVTYPER(4),
4501
PMU_PMEVTYPER(5),
4502
PMU_PMEVTYPER(6),
4503
PMU_PMEVTYPER(7),
4504
PMU_PMEVTYPER(8),
4505
PMU_PMEVTYPER(9),
4506
PMU_PMEVTYPER(10),
4507
PMU_PMEVTYPER(11),
4508
PMU_PMEVTYPER(12),
4509
PMU_PMEVTYPER(13),
4510
PMU_PMEVTYPER(14),
4511
PMU_PMEVTYPER(15),
4512
PMU_PMEVTYPER(16),
4513
PMU_PMEVTYPER(17),
4514
PMU_PMEVTYPER(18),
4515
PMU_PMEVTYPER(19),
4516
PMU_PMEVTYPER(20),
4517
PMU_PMEVTYPER(21),
4518
PMU_PMEVTYPER(22),
4519
PMU_PMEVTYPER(23),
4520
PMU_PMEVTYPER(24),
4521
PMU_PMEVTYPER(25),
4522
PMU_PMEVTYPER(26),
4523
PMU_PMEVTYPER(27),
4524
PMU_PMEVTYPER(28),
4525
PMU_PMEVTYPER(29),
4526
PMU_PMEVTYPER(30),
4527
/* PMCCFILTR */
4528
{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4529
4530
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4531
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4532
4533
/* CCSIDR2 */
4534
{ Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
4535
4536
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4537
};
4538
4539
static const struct sys_reg_desc cp15_64_regs[] = {
4540
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4541
{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4542
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4543
{ SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
4544
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4545
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4546
{ SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
4547
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4548
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
4549
{ SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
4550
{ SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
4551
};
4552
4553
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4554
bool reset_check)
4555
{
4556
unsigned int i;
4557
4558
for (i = 0; i < n; i++) {
4559
if (reset_check && table[i].reg && !table[i].reset) {
4560
kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4561
&table[i], i, table[i].name);
4562
return false;
4563
}
4564
4565
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4566
kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4567
&table[i], i, table[i - 1].name, table[i].name);
4568
return false;
4569
}
4570
}
4571
4572
return true;
4573
}
4574
4575
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4576
{
4577
kvm_inject_undefined(vcpu);
4578
return 1;
4579
}
4580
4581
static void perform_access(struct kvm_vcpu *vcpu,
4582
struct sys_reg_params *params,
4583
const struct sys_reg_desc *r)
4584
{
4585
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4586
4587
/* Check for regs disabled by runtime config */
4588
if (sysreg_hidden(vcpu, r)) {
4589
kvm_inject_undefined(vcpu);
4590
return;
4591
}
4592
4593
/*
4594
* Not having an accessor means that we have configured a trap
4595
* that we don't know how to handle. This certainly qualifies
4596
* as a gross bug that should be fixed right away.
4597
*/
4598
BUG_ON(!r->access);
4599
4600
/* Skip instruction if instructed so */
4601
if (likely(r->access(vcpu, params, r)))
4602
kvm_incr_pc(vcpu);
4603
}
4604
4605
/*
4606
* emulate_cp -- tries to match a sys_reg access in a handling table, and
4607
* call the corresponding trap handler.
4608
*
4609
* @params: pointer to the descriptor of the access
4610
* @table: array of trap descriptors
4611
* @num: size of the trap descriptor array
4612
*
4613
* Return true if the access has been handled, false if not.
4614
*/
4615
static bool emulate_cp(struct kvm_vcpu *vcpu,
4616
struct sys_reg_params *params,
4617
const struct sys_reg_desc *table,
4618
size_t num)
4619
{
4620
const struct sys_reg_desc *r;
4621
4622
if (!table)
4623
return false; /* Not handled */
4624
4625
r = find_reg(params, table, num);
4626
4627
if (r) {
4628
perform_access(vcpu, params, r);
4629
return true;
4630
}
4631
4632
/* Not handled */
4633
return false;
4634
}
4635
4636
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4637
struct sys_reg_params *params)
4638
{
4639
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4640
int cp = -1;
4641
4642
switch (esr_ec) {
4643
case ESR_ELx_EC_CP15_32:
4644
case ESR_ELx_EC_CP15_64:
4645
cp = 15;
4646
break;
4647
case ESR_ELx_EC_CP14_MR:
4648
case ESR_ELx_EC_CP14_64:
4649
cp = 14;
4650
break;
4651
default:
4652
WARN_ON(1);
4653
}
4654
4655
print_sys_reg_msg(params,
4656
"Unsupported guest CP%d access at: %08lx [%08lx]\n",
4657
cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4658
kvm_inject_undefined(vcpu);
4659
}
4660
4661
/**
4662
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4663
* @vcpu: The VCPU pointer
4664
* @global: &struct sys_reg_desc
4665
* @nr_global: size of the @global array
4666
*/
4667
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4668
const struct sys_reg_desc *global,
4669
size_t nr_global)
4670
{
4671
struct sys_reg_params params;
4672
u64 esr = kvm_vcpu_get_esr(vcpu);
4673
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4674
int Rt2 = (esr >> 10) & 0x1f;
4675
4676
params.CRm = (esr >> 1) & 0xf;
4677
params.is_write = ((esr & 1) == 0);
4678
4679
params.Op0 = 0;
4680
params.Op1 = (esr >> 16) & 0xf;
4681
params.Op2 = 0;
4682
params.CRn = 0;
4683
4684
/*
4685
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
4686
* backends between AArch32 and AArch64, we get away with it.
4687
*/
4688
if (params.is_write) {
4689
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4690
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4691
}
4692
4693
/*
4694
* If the table contains a handler, handle the
4695
* potential register operation in the case of a read and return
4696
* with success.
4697
*/
4698
if (emulate_cp(vcpu, &params, global, nr_global)) {
4699
/* Split up the value between registers for the read side */
4700
if (!params.is_write) {
4701
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4702
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4703
}
4704
4705
return 1;
4706
}
4707
4708
unhandled_cp_access(vcpu, &params);
4709
return 1;
4710
}
4711
4712
static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4713
4714
/*
4715
* The CP10 ID registers are architecturally mapped to AArch64 feature
4716
* registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4717
* from AArch32.
4718
*/
4719
static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4720
{
4721
u8 reg_id = (esr >> 10) & 0xf;
4722
bool valid;
4723
4724
params->is_write = ((esr & 1) == 0);
4725
params->Op0 = 3;
4726
params->Op1 = 0;
4727
params->CRn = 0;
4728
params->CRm = 3;
4729
4730
/* CP10 ID registers are read-only */
4731
valid = !params->is_write;
4732
4733
switch (reg_id) {
4734
/* MVFR0 */
4735
case 0b0111:
4736
params->Op2 = 0;
4737
break;
4738
/* MVFR1 */
4739
case 0b0110:
4740
params->Op2 = 1;
4741
break;
4742
/* MVFR2 */
4743
case 0b0101:
4744
params->Op2 = 2;
4745
break;
4746
default:
4747
valid = false;
4748
}
4749
4750
if (valid)
4751
return true;
4752
4753
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4754
str_write_read(params->is_write), reg_id);
4755
return false;
4756
}
4757
4758
/**
4759
* kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4760
* VFP Register' from AArch32.
4761
* @vcpu: The vCPU pointer
4762
*
4763
* MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4764
* Work out the correct AArch64 system register encoding and reroute to the
4765
* AArch64 system register emulation.
4766
*/
4767
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4768
{
4769
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4770
u64 esr = kvm_vcpu_get_esr(vcpu);
4771
struct sys_reg_params params;
4772
4773
/* UNDEF on any unhandled register access */
4774
if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
4775
kvm_inject_undefined(vcpu);
4776
return 1;
4777
}
4778
4779
if (emulate_sys_reg(vcpu, &params))
4780
vcpu_set_reg(vcpu, Rt, params.regval);
4781
4782
return 1;
4783
}
4784
4785
/**
4786
* kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4787
* CRn=0, which corresponds to the AArch32 feature
4788
* registers.
4789
* @vcpu: the vCPU pointer
4790
* @params: the system register access parameters.
4791
*
4792
* Our cp15 system register tables do not enumerate the AArch32 feature
4793
* registers. Conveniently, our AArch64 table does, and the AArch32 system
4794
* register encoding can be trivially remapped into the AArch64 for the feature
4795
* registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4796
*
4797
* According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4798
* System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4799
* range are either UNKNOWN or RES0. Rerouting remains architectural as we
4800
* treat undefined registers in this range as RAZ.
4801
*/
4802
static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4803
struct sys_reg_params *params)
4804
{
4805
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4806
4807
/* Treat impossible writes to RO registers as UNDEFINED */
4808
if (params->is_write) {
4809
unhandled_cp_access(vcpu, params);
4810
return 1;
4811
}
4812
4813
params->Op0 = 3;
4814
4815
/*
4816
* All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4817
* Avoid conflicting with future expansion of AArch64 feature registers
4818
* and simply treat them as RAZ here.
4819
*/
4820
if (params->CRm > 3)
4821
params->regval = 0;
4822
else if (!emulate_sys_reg(vcpu, params))
4823
return 1;
4824
4825
vcpu_set_reg(vcpu, Rt, params->regval);
4826
return 1;
4827
}
4828
4829
/**
4830
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4831
* @vcpu: The VCPU pointer
4832
* @params: &struct sys_reg_params
4833
* @global: &struct sys_reg_desc
4834
* @nr_global: size of the @global array
4835
*/
4836
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
4837
struct sys_reg_params *params,
4838
const struct sys_reg_desc *global,
4839
size_t nr_global)
4840
{
4841
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4842
4843
params->regval = vcpu_get_reg(vcpu, Rt);
4844
4845
if (emulate_cp(vcpu, params, global, nr_global)) {
4846
if (!params->is_write)
4847
vcpu_set_reg(vcpu, Rt, params->regval);
4848
return 1;
4849
}
4850
4851
unhandled_cp_access(vcpu, params);
4852
return 1;
4853
}
4854
4855
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
4856
{
4857
return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
4858
}
4859
4860
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4861
{
4862
struct sys_reg_params params;
4863
4864
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4865
4866
/*
4867
* Certain AArch32 ID registers are handled by rerouting to the AArch64
4868
* system register table. Registers in the ID range where CRm=0 are
4869
* excluded from this scheme as they do not trivially map into AArch64
4870
* system register encodings, except for AIDR/REVIDR.
4871
*/
4872
if (params.Op1 == 0 && params.CRn == 0 &&
4873
(params.CRm || params.Op2 == 6 /* REVIDR */))
4874
return kvm_emulate_cp15_id_reg(vcpu, &params);
4875
if (params.Op1 == 1 && params.CRn == 0 &&
4876
params.CRm == 0 && params.Op2 == 7 /* AIDR */)
4877
return kvm_emulate_cp15_id_reg(vcpu, &params);
4878
4879
return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
4880
}
4881
4882
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
4883
{
4884
return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
4885
}
4886
4887
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
4888
{
4889
struct sys_reg_params params;
4890
4891
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4892
4893
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
4894
}
4895
4896
/**
4897
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
4898
* @vcpu: The VCPU pointer
4899
* @params: Decoded system register parameters
4900
*
4901
* Return: true if the system register access was successful, false otherwise.
4902
*/
4903
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
4904
struct sys_reg_params *params)
4905
{
4906
const struct sys_reg_desc *r;
4907
4908
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4909
if (likely(r)) {
4910
perform_access(vcpu, params, r);
4911
return true;
4912
}
4913
4914
print_sys_reg_msg(params,
4915
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
4916
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4917
kvm_inject_undefined(vcpu);
4918
4919
return false;
4920
}
4921
4922
static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
4923
{
4924
unsigned long i, idreg_idx = 0;
4925
4926
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4927
const struct sys_reg_desc *r = &sys_reg_descs[i];
4928
4929
if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
4930
continue;
4931
4932
if (idreg_idx == pos)
4933
return r;
4934
4935
idreg_idx++;
4936
}
4937
4938
return NULL;
4939
}
4940
4941
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
4942
{
4943
struct kvm *kvm = s->private;
4944
u8 *iter;
4945
4946
mutex_lock(&kvm->arch.config_lock);
4947
4948
iter = &kvm->arch.idreg_debugfs_iter;
4949
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
4950
*iter == (u8)~0) {
4951
*iter = *pos;
4952
if (!idregs_debug_find(kvm, *iter))
4953
iter = NULL;
4954
} else {
4955
iter = ERR_PTR(-EBUSY);
4956
}
4957
4958
mutex_unlock(&kvm->arch.config_lock);
4959
4960
return iter;
4961
}
4962
4963
static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
4964
{
4965
struct kvm *kvm = s->private;
4966
4967
(*pos)++;
4968
4969
if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
4970
kvm->arch.idreg_debugfs_iter++;
4971
4972
return &kvm->arch.idreg_debugfs_iter;
4973
}
4974
4975
return NULL;
4976
}
4977
4978
static void idregs_debug_stop(struct seq_file *s, void *v)
4979
{
4980
struct kvm *kvm = s->private;
4981
4982
if (IS_ERR(v))
4983
return;
4984
4985
mutex_lock(&kvm->arch.config_lock);
4986
4987
kvm->arch.idreg_debugfs_iter = ~0;
4988
4989
mutex_unlock(&kvm->arch.config_lock);
4990
}
4991
4992
static int idregs_debug_show(struct seq_file *s, void *v)
4993
{
4994
const struct sys_reg_desc *desc;
4995
struct kvm *kvm = s->private;
4996
4997
desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
4998
4999
if (!desc->name)
5000
return 0;
5001
5002
seq_printf(s, "%20s:\t%016llx\n",
5003
desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
5004
5005
return 0;
5006
}
5007
5008
static const struct seq_operations idregs_debug_sops = {
5009
.start = idregs_debug_start,
5010
.next = idregs_debug_next,
5011
.stop = idregs_debug_stop,
5012
.show = idregs_debug_show,
5013
};
5014
5015
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
5016
5017
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
5018
{
5019
kvm->arch.idreg_debugfs_iter = ~0;
5020
5021
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
5022
&idregs_debug_fops);
5023
}
5024
5025
static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
5026
{
5027
u32 id = reg_to_encoding(reg);
5028
struct kvm *kvm = vcpu->kvm;
5029
5030
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5031
return;
5032
5033
kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
5034
}
5035
5036
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
5037
const struct sys_reg_desc *reg)
5038
{
5039
if (kvm_vcpu_initialized(vcpu))
5040
return;
5041
5042
reg->reset(vcpu, reg);
5043
}
5044
5045
/**
5046
* kvm_reset_sys_regs - sets system registers to reset value
5047
* @vcpu: The VCPU pointer
5048
*
5049
* This function finds the right table above and sets the registers on the
5050
* virtual CPU struct to their architecturally defined reset values.
5051
*/
5052
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
5053
{
5054
struct kvm *kvm = vcpu->kvm;
5055
unsigned long i;
5056
5057
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5058
const struct sys_reg_desc *r = &sys_reg_descs[i];
5059
5060
if (!r->reset)
5061
continue;
5062
5063
if (is_vm_ftr_id_reg(reg_to_encoding(r)))
5064
reset_vm_ftr_id_reg(vcpu, r);
5065
else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
5066
reset_vcpu_ftr_id_reg(vcpu, r);
5067
else
5068
r->reset(vcpu, r);
5069
5070
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
5071
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
5072
}
5073
5074
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
5075
5076
if (kvm_vcpu_has_pmu(vcpu))
5077
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
5078
}
5079
5080
/**
5081
* kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
5082
* trap on a guest execution
5083
* @vcpu: The VCPU pointer
5084
*/
5085
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
5086
{
5087
const struct sys_reg_desc *desc = NULL;
5088
struct sys_reg_params params;
5089
unsigned long esr = kvm_vcpu_get_esr(vcpu);
5090
int Rt = kvm_vcpu_sys_get_rt(vcpu);
5091
int sr_idx;
5092
5093
trace_kvm_handle_sys_reg(esr);
5094
5095
if (triage_sysreg_trap(vcpu, &sr_idx))
5096
return 1;
5097
5098
params = esr_sys64_to_params(esr);
5099
params.regval = vcpu_get_reg(vcpu, Rt);
5100
5101
/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
5102
if (params.Op0 == 2 || params.Op0 == 3)
5103
desc = &sys_reg_descs[sr_idx];
5104
else
5105
desc = &sys_insn_descs[sr_idx];
5106
5107
perform_access(vcpu, &params, desc);
5108
5109
/* Read from system register? */
5110
if (!params.is_write &&
5111
(params.Op0 == 2 || params.Op0 == 3))
5112
vcpu_set_reg(vcpu, Rt, params.regval);
5113
5114
return 1;
5115
}
5116
5117
/******************************************************************************
5118
* Userspace API
5119
*****************************************************************************/
5120
5121
static bool index_to_params(u64 id, struct sys_reg_params *params)
5122
{
5123
switch (id & KVM_REG_SIZE_MASK) {
5124
case KVM_REG_SIZE_U64:
5125
/* Any unused index bits means it's not valid. */
5126
if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
5127
| KVM_REG_ARM_COPROC_MASK
5128
| KVM_REG_ARM64_SYSREG_OP0_MASK
5129
| KVM_REG_ARM64_SYSREG_OP1_MASK
5130
| KVM_REG_ARM64_SYSREG_CRN_MASK
5131
| KVM_REG_ARM64_SYSREG_CRM_MASK
5132
| KVM_REG_ARM64_SYSREG_OP2_MASK))
5133
return false;
5134
params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
5135
>> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
5136
params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
5137
>> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
5138
params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
5139
>> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
5140
params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
5141
>> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
5142
params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
5143
>> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
5144
return true;
5145
default:
5146
return false;
5147
}
5148
}
5149
5150
const struct sys_reg_desc *get_reg_by_id(u64 id,
5151
const struct sys_reg_desc table[],
5152
unsigned int num)
5153
{
5154
struct sys_reg_params params;
5155
5156
if (!index_to_params(id, &params))
5157
return NULL;
5158
5159
return find_reg(&params, table, num);
5160
}
5161
5162
/* Decode an index value, and find the sys_reg_desc entry. */
5163
static const struct sys_reg_desc *
5164
id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
5165
const struct sys_reg_desc table[], unsigned int num)
5166
5167
{
5168
const struct sys_reg_desc *r;
5169
5170
/* We only do sys_reg for now. */
5171
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
5172
return NULL;
5173
5174
r = get_reg_by_id(id, table, num);
5175
5176
/* Not saved in the sys_reg array and not otherwise accessible? */
5177
if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
5178
r = NULL;
5179
5180
return r;
5181
}
5182
5183
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5184
{
5185
u32 val;
5186
u32 __user *uval = uaddr;
5187
5188
/* Fail if we have unknown bits set. */
5189
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5190
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5191
return -ENOENT;
5192
5193
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5194
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5195
if (KVM_REG_SIZE(id) != 4)
5196
return -ENOENT;
5197
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5198
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5199
if (val >= CSSELR_MAX)
5200
return -ENOENT;
5201
5202
return put_user(get_ccsidr(vcpu, val), uval);
5203
default:
5204
return -ENOENT;
5205
}
5206
}
5207
5208
static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5209
{
5210
u32 val, newval;
5211
u32 __user *uval = uaddr;
5212
5213
/* Fail if we have unknown bits set. */
5214
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5215
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5216
return -ENOENT;
5217
5218
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5219
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5220
if (KVM_REG_SIZE(id) != 4)
5221
return -ENOENT;
5222
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5223
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5224
if (val >= CSSELR_MAX)
5225
return -ENOENT;
5226
5227
if (get_user(newval, uval))
5228
return -EFAULT;
5229
5230
return set_ccsidr(vcpu, val, newval);
5231
default:
5232
return -ENOENT;
5233
}
5234
}
5235
5236
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5237
const struct sys_reg_desc table[], unsigned int num)
5238
{
5239
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5240
const struct sys_reg_desc *r;
5241
u64 val;
5242
int ret;
5243
5244
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
5245
if (!r || sysreg_hidden(vcpu, r))
5246
return -ENOENT;
5247
5248
if (r->get_user) {
5249
ret = (r->get_user)(vcpu, r, &val);
5250
} else {
5251
val = __vcpu_sys_reg(vcpu, r->reg);
5252
ret = 0;
5253
}
5254
5255
if (!ret)
5256
ret = put_user(val, uaddr);
5257
5258
return ret;
5259
}
5260
5261
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5262
{
5263
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5264
5265
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5266
return demux_c15_get(vcpu, reg->id, uaddr);
5267
5268
return kvm_sys_reg_get_user(vcpu, reg,
5269
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5270
}
5271
5272
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5273
const struct sys_reg_desc table[], unsigned int num)
5274
{
5275
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5276
const struct sys_reg_desc *r;
5277
u64 val;
5278
int ret;
5279
5280
if (get_user(val, uaddr))
5281
return -EFAULT;
5282
5283
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
5284
if (!r || sysreg_hidden(vcpu, r))
5285
return -ENOENT;
5286
5287
if (sysreg_user_write_ignore(vcpu, r))
5288
return 0;
5289
5290
if (r->set_user) {
5291
ret = (r->set_user)(vcpu, r, val);
5292
} else {
5293
__vcpu_assign_sys_reg(vcpu, r->reg, val);
5294
ret = 0;
5295
}
5296
5297
return ret;
5298
}
5299
5300
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5301
{
5302
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5303
5304
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5305
return demux_c15_set(vcpu, reg->id, uaddr);
5306
5307
return kvm_sys_reg_set_user(vcpu, reg,
5308
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5309
}
5310
5311
static unsigned int num_demux_regs(void)
5312
{
5313
return CSSELR_MAX;
5314
}
5315
5316
static int write_demux_regids(u64 __user *uindices)
5317
{
5318
u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5319
unsigned int i;
5320
5321
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5322
for (i = 0; i < CSSELR_MAX; i++) {
5323
if (put_user(val | i, uindices))
5324
return -EFAULT;
5325
uindices++;
5326
}
5327
return 0;
5328
}
5329
5330
static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5331
{
5332
return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5333
KVM_REG_ARM64_SYSREG |
5334
(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5335
(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5336
(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5337
(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5338
(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5339
}
5340
5341
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5342
{
5343
if (!*uind)
5344
return true;
5345
5346
if (put_user(sys_reg_to_index(reg), *uind))
5347
return false;
5348
5349
(*uind)++;
5350
return true;
5351
}
5352
5353
static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5354
const struct sys_reg_desc *rd,
5355
u64 __user **uind,
5356
unsigned int *total)
5357
{
5358
/*
5359
* Ignore registers we trap but don't save,
5360
* and for which no custom user accessor is provided.
5361
*/
5362
if (!(rd->reg || rd->get_user))
5363
return 0;
5364
5365
if (sysreg_hidden(vcpu, rd))
5366
return 0;
5367
5368
if (!copy_reg_to_user(rd, uind))
5369
return -EFAULT;
5370
5371
(*total)++;
5372
return 0;
5373
}
5374
5375
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
5376
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5377
{
5378
const struct sys_reg_desc *i2, *end2;
5379
unsigned int total = 0;
5380
int err;
5381
5382
i2 = sys_reg_descs;
5383
end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5384
5385
while (i2 != end2) {
5386
err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5387
if (err)
5388
return err;
5389
}
5390
return total;
5391
}
5392
5393
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5394
{
5395
return num_demux_regs()
5396
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
5397
}
5398
5399
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5400
{
5401
int err;
5402
5403
err = walk_sys_regs(vcpu, uindices);
5404
if (err < 0)
5405
return err;
5406
uindices += err;
5407
5408
return write_demux_regids(uindices);
5409
}
5410
5411
#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
5412
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
5413
sys_reg_Op1(r), \
5414
sys_reg_CRn(r), \
5415
sys_reg_CRm(r), \
5416
sys_reg_Op2(r))
5417
5418
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5419
{
5420
const void *zero_page = page_to_virt(ZERO_PAGE(0));
5421
u64 __user *masks = (u64 __user *)range->addr;
5422
5423
/* Only feature id range is supported, reserved[13] must be zero. */
5424
if (range->range ||
5425
memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5426
return -EINVAL;
5427
5428
/* Wipe the whole thing first */
5429
if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5430
return -EFAULT;
5431
5432
for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5433
const struct sys_reg_desc *reg = &sys_reg_descs[i];
5434
u32 encoding = reg_to_encoding(reg);
5435
u64 val;
5436
5437
if (!is_feature_id_reg(encoding) || !reg->set_user)
5438
continue;
5439
5440
if (!reg->val ||
5441
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5442
continue;
5443
}
5444
val = reg->val;
5445
5446
if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5447
return -EFAULT;
5448
}
5449
5450
return 0;
5451
}
5452
5453
static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5454
{
5455
struct kvm *kvm = vcpu->kvm;
5456
5457
if (has_vhe() || has_hvhe())
5458
vcpu->arch.hcr_el2 |= HCR_E2H;
5459
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5460
/* route synchronous external abort exceptions to EL2 */
5461
vcpu->arch.hcr_el2 |= HCR_TEA;
5462
/* trap error record accesses */
5463
vcpu->arch.hcr_el2 |= HCR_TERR;
5464
}
5465
5466
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5467
vcpu->arch.hcr_el2 |= HCR_FWB;
5468
5469
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5470
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5471
kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5472
vcpu->arch.hcr_el2 |= HCR_TID4;
5473
else
5474
vcpu->arch.hcr_el2 |= HCR_TID2;
5475
5476
if (vcpu_el1_is_32bit(vcpu))
5477
vcpu->arch.hcr_el2 &= ~HCR_RW;
5478
5479
if (kvm_has_mte(vcpu->kvm))
5480
vcpu->arch.hcr_el2 |= HCR_ATA;
5481
5482
/*
5483
* In the absence of FGT, we cannot independently trap TLBI
5484
* Range instructions. This isn't great, but trapping all
5485
* TLBIs would be far worse. Live with it...
5486
*/
5487
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5488
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5489
}
5490
5491
void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5492
{
5493
struct kvm *kvm = vcpu->kvm;
5494
5495
mutex_lock(&kvm->arch.config_lock);
5496
vcpu_set_hcr(vcpu);
5497
vcpu_set_ich_hcr(vcpu);
5498
vcpu_set_hcrx(vcpu);
5499
5500
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5501
goto out;
5502
5503
compute_fgu(kvm, HFGRTR_GROUP);
5504
compute_fgu(kvm, HFGITR_GROUP);
5505
compute_fgu(kvm, HDFGRTR_GROUP);
5506
compute_fgu(kvm, HAFGRTR_GROUP);
5507
compute_fgu(kvm, HFGRTR2_GROUP);
5508
compute_fgu(kvm, HFGITR2_GROUP);
5509
compute_fgu(kvm, HDFGRTR2_GROUP);
5510
5511
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5512
out:
5513
mutex_unlock(&kvm->arch.config_lock);
5514
}
5515
5516
/*
5517
* Perform last adjustments to the ID registers that are implied by the
5518
* configuration outside of the ID regs themselves, as well as any
5519
* initialisation that directly depend on these ID registers (such as
5520
* RES0/RES1 behaviours). This is not the place to configure traps though.
5521
*
5522
* Because this can be called once per CPU, changes must be idempotent.
5523
*/
5524
int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5525
{
5526
struct kvm *kvm = vcpu->kvm;
5527
5528
guard(mutex)(&kvm->arch.config_lock);
5529
5530
if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
5531
irqchip_in_kernel(kvm) &&
5532
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
5533
kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
5534
kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
5535
}
5536
5537
if (vcpu_has_nv(vcpu)) {
5538
int ret = kvm_init_nv_sysregs(vcpu);
5539
if (ret)
5540
return ret;
5541
}
5542
5543
return 0;
5544
}
5545
5546
int __init kvm_sys_reg_table_init(void)
5547
{
5548
const struct sys_reg_desc *gicv3_regs;
5549
bool valid = true;
5550
unsigned int i, sz;
5551
int ret = 0;
5552
5553
/* Make sure tables are unique and in order. */
5554
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
5555
valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
5556
valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
5557
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
5558
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
5559
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5560
5561
gicv3_regs = vgic_v3_get_sysreg_table(&sz);
5562
valid &= check_sysreg_table(gicv3_regs, sz, false);
5563
5564
if (!valid)
5565
return -EINVAL;
5566
5567
init_imp_id_regs();
5568
5569
ret = populate_nv_trap_config();
5570
5571
check_feature_map();
5572
5573
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5574
ret = populate_sysreg_config(sys_reg_descs + i, i);
5575
5576
for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5577
ret = populate_sysreg_config(sys_insn_descs + i, i);
5578
5579
return ret;
5580
}
5581
5582