Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/pmu.c
29521 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Kernel-based Virtual Machine -- Performance Monitoring Unit support
4
*
5
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
6
*
7
* Authors:
8
* Avi Kivity <[email protected]>
9
* Gleb Natapov <[email protected]>
10
* Wei Huang <[email protected]>
11
*/
12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14
#include <linux/types.h>
15
#include <linux/kvm_host.h>
16
#include <linux/perf_event.h>
17
#include <linux/bsearch.h>
18
#include <linux/sort.h>
19
#include <asm/perf_event.h>
20
#include <asm/cpu_device_id.h>
21
#include "x86.h"
22
#include "cpuid.h"
23
#include "lapic.h"
24
#include "pmu.h"
25
26
/* This is enough to filter the vast majority of currently defined events. */
27
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
28
29
/* Unadultered PMU capabilities of the host, i.e. of hardware. */
30
static struct x86_pmu_capability __read_mostly kvm_host_pmu;
31
32
/* KVM's PMU capabilities, i.e. the intersection of KVM and hardware support. */
33
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
34
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_cap);
35
36
struct kvm_pmu_emulated_event_selectors {
37
u64 INSTRUCTIONS_RETIRED;
38
u64 BRANCH_INSTRUCTIONS_RETIRED;
39
};
40
static struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
41
42
/* Precise Distribution of Instructions Retired (PDIR) */
43
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
44
X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
45
X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
46
/* Instruction-Accurate PDIR (PDIR++) */
47
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
48
{}
49
};
50
51
/* Precise Distribution (PDist) */
52
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
53
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
54
{}
55
};
56
57
/* NOTE:
58
* - Each perf counter is defined as "struct kvm_pmc";
59
* - There are two types of perf counters: general purpose (gp) and fixed.
60
* gp counters are stored in gp_counters[] and fixed counters are stored
61
* in fixed_counters[] respectively. Both of them are part of "struct
62
* kvm_pmu";
63
* - pmu.c understands the difference between gp counters and fixed counters.
64
* However AMD doesn't support fixed-counters;
65
* - There are three types of index to access perf counters (PMC):
66
* 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
67
* has MSR_K7_PERFCTRn and, for families 15H and later,
68
* MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
69
* aliased to MSR_K7_PERFCTRn.
70
* 2. MSR Index (named idx): This normally is used by RDPMC instruction.
71
* For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
72
* C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
73
* that it also supports fixed counters. idx can be used to as index to
74
* gp and fixed counters.
75
* 3. Global PMC Index (named pmc): pmc is an index specific to PMU
76
* code. Each pmc, stored in kvm_pmc.idx field, is unique across
77
* all perf counters (both gp and fixed). The mapping relationship
78
* between pmc and perf counters is as the following:
79
* * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
80
* [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
81
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
82
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
83
*/
84
85
static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
86
87
#define KVM_X86_PMU_OP(func) \
88
DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \
89
*(((struct kvm_pmu_ops *)0)->func));
90
#define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
91
#include <asm/kvm-x86-pmu-ops.h>
92
93
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
94
{
95
memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
96
97
#define __KVM_X86_PMU_OP(func) \
98
static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
99
#define KVM_X86_PMU_OP(func) \
100
WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
101
#define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
102
#include <asm/kvm-x86-pmu-ops.h>
103
#undef __KVM_X86_PMU_OP
104
}
105
106
void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
107
{
108
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
109
int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
110
111
perf_get_x86_pmu_capability(&kvm_host_pmu);
112
113
/*
114
* Hybrid PMUs don't play nice with virtualization without careful
115
* configuration by userspace, and KVM's APIs for reporting supported
116
* vPMU features do not account for hybrid PMUs. Disable vPMU support
117
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
118
*/
119
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
120
enable_pmu = false;
121
122
if (enable_pmu) {
123
/*
124
* WARN if perf did NOT disable hardware PMU if the number of
125
* architecturally required GP counters aren't present, i.e. if
126
* there are a non-zero number of counters, but fewer than what
127
* is architecturally required.
128
*/
129
if (!kvm_host_pmu.num_counters_gp ||
130
WARN_ON_ONCE(kvm_host_pmu.num_counters_gp < min_nr_gp_ctrs))
131
enable_pmu = false;
132
else if (is_intel && !kvm_host_pmu.version)
133
enable_pmu = false;
134
}
135
136
if (!enable_pmu) {
137
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
138
return;
139
}
140
141
memcpy(&kvm_pmu_cap, &kvm_host_pmu, sizeof(kvm_host_pmu));
142
kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
143
kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
144
pmu_ops->MAX_NR_GP_COUNTERS);
145
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
146
KVM_MAX_NR_FIXED_COUNTERS);
147
148
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
149
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
150
kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
151
perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
152
}
153
154
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
155
{
156
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
157
bool skip_pmi = false;
158
159
if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
160
if (!in_pmi) {
161
/*
162
* TODO: KVM is currently _choosing_ to not generate records
163
* for emulated instructions, avoiding BUFFER_OVF PMI when
164
* there are no records. Strictly speaking, it should be done
165
* as well in the right context to improve sampling accuracy.
166
*/
167
skip_pmi = true;
168
} else {
169
/* Indicate PEBS overflow PMI to guest. */
170
skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
171
(unsigned long *)&pmu->global_status);
172
}
173
} else {
174
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
175
}
176
177
if (pmc->intr && !skip_pmi)
178
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
179
}
180
181
static void kvm_perf_overflow(struct perf_event *perf_event,
182
struct perf_sample_data *data,
183
struct pt_regs *regs)
184
{
185
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
186
187
/*
188
* Ignore asynchronous overflow events for counters that are scheduled
189
* to be reprogrammed, e.g. if a PMI for the previous event races with
190
* KVM's handling of a related guest WRMSR.
191
*/
192
if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
193
return;
194
195
__kvm_perf_overflow(pmc, true);
196
197
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
198
}
199
200
static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
201
{
202
/*
203
* For some model specific pebs counters with special capabilities
204
* (PDIR, PDIR++, PDIST), KVM needs to raise the event precise
205
* level to the maximum value (currently 3, backwards compatible)
206
* so that the perf subsystem would assign specific hardware counter
207
* with that capability for vPMC.
208
*/
209
if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) ||
210
(pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu)))
211
return 3;
212
213
/*
214
* The non-zero precision level of guest event makes the ordinary
215
* guest event becomes a guest PEBS event and triggers the host
216
* PEBS PMI handler to determine whether the PEBS overflow PMI
217
* comes from the host counters or the guest.
218
*/
219
return 1;
220
}
221
222
static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
223
{
224
u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
225
226
if (!sample_period)
227
sample_period = pmc_bitmask(pmc) + 1;
228
return sample_period;
229
}
230
231
static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
232
bool exclude_user, bool exclude_kernel,
233
bool intr)
234
{
235
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
236
struct perf_event *event;
237
struct perf_event_attr attr = {
238
.type = type,
239
.size = sizeof(attr),
240
.pinned = true,
241
.exclude_idle = true,
242
.exclude_host = 1,
243
.exclude_user = exclude_user,
244
.exclude_kernel = exclude_kernel,
245
.config = config,
246
};
247
bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
248
249
attr.sample_period = get_sample_period(pmc, pmc->counter);
250
251
if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
252
(boot_cpu_has(X86_FEATURE_RTM) || boot_cpu_has(X86_FEATURE_HLE))) {
253
/*
254
* HSW_IN_TX_CHECKPOINTED is not supported with nonzero
255
* period. Just clear the sample period so at least
256
* allocating the counter doesn't fail.
257
*/
258
attr.sample_period = 0;
259
}
260
if (pebs) {
261
/*
262
* For most PEBS hardware events, the difference in the software
263
* precision levels of guest and host PEBS events will not affect
264
* the accuracy of the PEBS profiling result, because the "event IP"
265
* in the PEBS record is calibrated on the guest side.
266
*/
267
attr.precise_ip = pmc_get_pebs_precise_level(pmc);
268
}
269
270
event = perf_event_create_kernel_counter(&attr, -1, current,
271
kvm_perf_overflow, pmc);
272
if (IS_ERR(event)) {
273
pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
274
PTR_ERR(event), pmc->idx);
275
return PTR_ERR(event);
276
}
277
278
pmc->perf_event = event;
279
pmc_to_pmu(pmc)->event_count++;
280
pmc->is_paused = false;
281
pmc->intr = intr || pebs;
282
return 0;
283
}
284
285
static bool pmc_pause_counter(struct kvm_pmc *pmc)
286
{
287
u64 counter = pmc->counter;
288
u64 prev_counter;
289
290
/* update counter, reset event value to avoid redundant accumulation */
291
if (pmc->perf_event && !pmc->is_paused)
292
counter += perf_event_pause(pmc->perf_event, true);
293
294
/*
295
* Snapshot the previous counter *after* accumulating state from perf.
296
* If overflow already happened, hardware (via perf) is responsible for
297
* generating a PMI. KVM just needs to detect overflow on emulated
298
* counter events that haven't yet been processed.
299
*/
300
prev_counter = counter & pmc_bitmask(pmc);
301
302
counter += pmc->emulated_counter;
303
pmc->counter = counter & pmc_bitmask(pmc);
304
305
pmc->emulated_counter = 0;
306
pmc->is_paused = true;
307
308
return pmc->counter < prev_counter;
309
}
310
311
static bool pmc_resume_counter(struct kvm_pmc *pmc)
312
{
313
if (!pmc->perf_event)
314
return false;
315
316
/* recalibrate sample period and check if it's accepted by perf core */
317
if (is_sampling_event(pmc->perf_event) &&
318
perf_event_period(pmc->perf_event,
319
get_sample_period(pmc, pmc->counter)))
320
return false;
321
322
if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
323
(!!pmc->perf_event->attr.precise_ip))
324
return false;
325
326
/* reuse perf_event to serve as pmc_reprogram_counter() does*/
327
perf_event_enable(pmc->perf_event);
328
pmc->is_paused = false;
329
330
return true;
331
}
332
333
static void pmc_release_perf_event(struct kvm_pmc *pmc)
334
{
335
if (pmc->perf_event) {
336
perf_event_release_kernel(pmc->perf_event);
337
pmc->perf_event = NULL;
338
pmc->current_config = 0;
339
pmc_to_pmu(pmc)->event_count--;
340
}
341
}
342
343
static void pmc_stop_counter(struct kvm_pmc *pmc)
344
{
345
if (pmc->perf_event) {
346
pmc->counter = pmc_read_counter(pmc);
347
pmc_release_perf_event(pmc);
348
}
349
}
350
351
static void pmc_update_sample_period(struct kvm_pmc *pmc)
352
{
353
if (!pmc->perf_event || pmc->is_paused ||
354
!is_sampling_event(pmc->perf_event))
355
return;
356
357
perf_event_period(pmc->perf_event,
358
get_sample_period(pmc, pmc->counter));
359
}
360
361
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
362
{
363
/*
364
* Drop any unconsumed accumulated counts, the WRMSR is a write, not a
365
* read-modify-write. Adjust the counter value so that its value is
366
* relative to the current count, as reading the current count from
367
* perf is faster than pausing and repgrogramming the event in order to
368
* reset it to '0'. Note, this very sneakily offsets the accumulated
369
* emulated count too, by using pmc_read_counter()!
370
*/
371
pmc->emulated_counter = 0;
372
pmc->counter += val - pmc_read_counter(pmc);
373
pmc->counter &= pmc_bitmask(pmc);
374
pmc_update_sample_period(pmc);
375
}
376
EXPORT_SYMBOL_FOR_KVM_INTERNAL(pmc_write_counter);
377
378
static int filter_cmp(const void *pa, const void *pb, u64 mask)
379
{
380
u64 a = *(u64 *)pa & mask;
381
u64 b = *(u64 *)pb & mask;
382
383
return (a > b) - (a < b);
384
}
385
386
387
static int filter_sort_cmp(const void *pa, const void *pb)
388
{
389
return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT |
390
KVM_PMU_MASKED_ENTRY_EXCLUDE));
391
}
392
393
/*
394
* For the event filter, searching is done on the 'includes' list and
395
* 'excludes' list separately rather than on the 'events' list (which
396
* has both). As a result the exclude bit can be ignored.
397
*/
398
static int filter_event_cmp(const void *pa, const void *pb)
399
{
400
return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT));
401
}
402
403
static int find_filter_index(u64 *events, u64 nevents, u64 key)
404
{
405
u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]),
406
filter_event_cmp);
407
408
if (!fe)
409
return -1;
410
411
return fe - events;
412
}
413
414
static bool is_filter_entry_match(u64 filter_event, u64 umask)
415
{
416
u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8);
417
u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH;
418
419
BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >>
420
(KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) !=
421
ARCH_PERFMON_EVENTSEL_UMASK);
422
423
return (umask & mask) == match;
424
}
425
426
static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
427
{
428
u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT;
429
u64 umask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
430
int i, index;
431
432
index = find_filter_index(events, nevents, event_select);
433
if (index < 0)
434
return false;
435
436
/*
437
* Entries are sorted by the event select. Walk the list in both
438
* directions to process all entries with the targeted event select.
439
*/
440
for (i = index; i < nevents; i++) {
441
if (filter_event_cmp(&events[i], &event_select))
442
break;
443
444
if (is_filter_entry_match(events[i], umask))
445
return true;
446
}
447
448
for (i = index - 1; i >= 0; i--) {
449
if (filter_event_cmp(&events[i], &event_select))
450
break;
451
452
if (is_filter_entry_match(events[i], umask))
453
return true;
454
}
455
456
return false;
457
}
458
459
static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
460
u64 eventsel)
461
{
462
if (filter_contains_match(f->includes, f->nr_includes, eventsel) &&
463
!filter_contains_match(f->excludes, f->nr_excludes, eventsel))
464
return f->action == KVM_PMU_EVENT_ALLOW;
465
466
return f->action == KVM_PMU_EVENT_DENY;
467
}
468
469
static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
470
int idx)
471
{
472
int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX;
473
474
if (filter->action == KVM_PMU_EVENT_DENY &&
475
test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
476
return false;
477
if (filter->action == KVM_PMU_EVENT_ALLOW &&
478
!test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
479
return false;
480
481
return true;
482
}
483
484
static bool pmc_is_event_allowed(struct kvm_pmc *pmc)
485
{
486
struct kvm_x86_pmu_event_filter *filter;
487
struct kvm *kvm = pmc->vcpu->kvm;
488
489
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
490
if (!filter)
491
return true;
492
493
if (pmc_is_gp(pmc))
494
return is_gp_event_allowed(filter, pmc->eventsel);
495
496
return is_fixed_event_allowed(filter, pmc->idx);
497
}
498
499
static int reprogram_counter(struct kvm_pmc *pmc)
500
{
501
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
502
u64 eventsel = pmc->eventsel;
503
u64 new_config = eventsel;
504
bool emulate_overflow;
505
u8 fixed_ctr_ctrl;
506
507
emulate_overflow = pmc_pause_counter(pmc);
508
509
if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) ||
510
!pmc_is_event_allowed(pmc))
511
return 0;
512
513
if (emulate_overflow)
514
__kvm_perf_overflow(pmc, false);
515
516
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
517
printk_once("kvm pmu: pin control bit is ignored\n");
518
519
if (pmc_is_fixed(pmc)) {
520
fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
521
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
522
if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL)
523
eventsel |= ARCH_PERFMON_EVENTSEL_OS;
524
if (fixed_ctr_ctrl & INTEL_FIXED_0_USER)
525
eventsel |= ARCH_PERFMON_EVENTSEL_USR;
526
if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI)
527
eventsel |= ARCH_PERFMON_EVENTSEL_INT;
528
new_config = (u64)fixed_ctr_ctrl;
529
}
530
531
if (pmc->current_config == new_config && pmc_resume_counter(pmc))
532
return 0;
533
534
pmc_release_perf_event(pmc);
535
536
pmc->current_config = new_config;
537
538
return pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
539
(eventsel & pmu->raw_event_mask),
540
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
541
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
542
eventsel & ARCH_PERFMON_EVENTSEL_INT);
543
}
544
545
static bool pmc_is_event_match(struct kvm_pmc *pmc, u64 eventsel)
546
{
547
/*
548
* Ignore checks for edge detect (all events currently emulated by KVM
549
* are always rising edges), pin control (unsupported by modern CPUs),
550
* and counter mask and its invert flag (KVM doesn't emulate multiple
551
* events in a single clock cycle).
552
*
553
* Note, the uppermost nibble of AMD's mask overlaps Intel's IN_TX (bit
554
* 32) and IN_TXCP (bit 33), as well as two reserved bits (bits 35:34).
555
* Checking the "in HLE/RTM transaction" flags is correct as the vCPU
556
* can't be in a transaction if KVM is emulating an instruction.
557
*
558
* Checking the reserved bits might be wrong if they are defined in the
559
* future, but so could ignoring them, so do the simple thing for now.
560
*/
561
return !((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB);
562
}
563
564
void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc)
565
{
566
bitmap_clear(pmu->pmc_counting_instructions, pmc->idx, 1);
567
bitmap_clear(pmu->pmc_counting_branches, pmc->idx, 1);
568
569
/*
570
* Do NOT consult the PMU event filters, as the filters must be checked
571
* at the time of emulation to ensure KVM uses fresh information, e.g.
572
* omitting a PMC from a bitmap could result in a missed event if the
573
* filter is changed to allow counting the event.
574
*/
575
if (!pmc_is_locally_enabled(pmc))
576
return;
577
578
if (pmc_is_event_match(pmc, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED))
579
bitmap_set(pmu->pmc_counting_instructions, pmc->idx, 1);
580
581
if (pmc_is_event_match(pmc, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED))
582
bitmap_set(pmu->pmc_counting_branches, pmc->idx, 1);
583
}
584
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_recalc_pmc_emulation);
585
586
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
587
{
588
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
589
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
590
struct kvm_pmc *pmc;
591
int bit;
592
593
bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX);
594
595
/*
596
* The reprogramming bitmap can be written asynchronously by something
597
* other than the task that holds vcpu->mutex, take care to clear only
598
* the bits that will actually processed.
599
*/
600
BUILD_BUG_ON(sizeof(bitmap) != sizeof(atomic64_t));
601
atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi);
602
603
kvm_for_each_pmc(pmu, pmc, bit, bitmap) {
604
/*
605
* If reprogramming fails, e.g. due to contention, re-set the
606
* regprogram bit set, i.e. opportunistically try again on the
607
* next PMU refresh. Don't make a new request as doing so can
608
* stall the guest if reprogramming repeatedly fails.
609
*/
610
if (reprogram_counter(pmc))
611
set_bit(pmc->idx, pmu->reprogram_pmi);
612
}
613
614
/*
615
* Release unused perf_events if the corresponding guest MSRs weren't
616
* accessed during the last vCPU time slice (need_cleanup is set when
617
* the vCPU is scheduled back in).
618
*/
619
if (unlikely(pmu->need_cleanup))
620
kvm_pmu_cleanup(vcpu);
621
622
kvm_for_each_pmc(pmu, pmc, bit, bitmap)
623
kvm_pmu_recalc_pmc_emulation(pmu, pmc);
624
}
625
626
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
627
{
628
/*
629
* On Intel, VMX interception has priority over RDPMC exceptions that
630
* aren't already handled by the emulator, i.e. there are no additional
631
* check needed for Intel PMUs.
632
*
633
* On AMD, _all_ exceptions on RDPMC have priority over SVM intercepts,
634
* i.e. an invalid PMC results in a #GP, not #VMEXIT.
635
*/
636
if (!kvm_pmu_ops.check_rdpmc_early)
637
return 0;
638
639
return kvm_pmu_call(check_rdpmc_early)(vcpu, idx);
640
}
641
642
bool is_vmware_backdoor_pmc(u32 pmc_idx)
643
{
644
switch (pmc_idx) {
645
case VMWARE_BACKDOOR_PMC_HOST_TSC:
646
case VMWARE_BACKDOOR_PMC_REAL_TIME:
647
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
648
return true;
649
}
650
return false;
651
}
652
653
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
654
{
655
u64 ctr_val;
656
657
switch (idx) {
658
case VMWARE_BACKDOOR_PMC_HOST_TSC:
659
ctr_val = rdtsc();
660
break;
661
case VMWARE_BACKDOOR_PMC_REAL_TIME:
662
ctr_val = ktime_get_boottime_ns();
663
break;
664
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
665
ctr_val = ktime_get_boottime_ns() +
666
vcpu->kvm->arch.kvmclock_offset;
667
break;
668
default:
669
return 1;
670
}
671
672
*data = ctr_val;
673
return 0;
674
}
675
676
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
677
{
678
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
679
struct kvm_pmc *pmc;
680
u64 mask = ~0ull;
681
682
if (!pmu->version)
683
return 1;
684
685
if (is_vmware_backdoor_pmc(idx))
686
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
687
688
pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
689
if (!pmc)
690
return 1;
691
692
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
693
(kvm_x86_call(get_cpl)(vcpu) != 0) &&
694
kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
695
return 1;
696
697
*data = pmc_read_counter(pmc) & mask;
698
return 0;
699
}
700
701
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
702
{
703
if (lapic_in_kernel(vcpu)) {
704
kvm_pmu_call(deliver_pmi)(vcpu);
705
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
706
}
707
}
708
709
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
710
{
711
switch (msr) {
712
case MSR_CORE_PERF_GLOBAL_STATUS:
713
case MSR_CORE_PERF_GLOBAL_CTRL:
714
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
715
return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
716
default:
717
break;
718
}
719
return kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr) ||
720
kvm_pmu_call(is_valid_msr)(vcpu, msr);
721
}
722
723
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
724
{
725
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
726
struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr);
727
728
if (pmc)
729
__set_bit(pmc->idx, pmu->pmc_in_use);
730
}
731
732
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
733
{
734
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
735
u32 msr = msr_info->index;
736
737
switch (msr) {
738
case MSR_CORE_PERF_GLOBAL_STATUS:
739
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
740
msr_info->data = pmu->global_status;
741
break;
742
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
743
case MSR_CORE_PERF_GLOBAL_CTRL:
744
msr_info->data = pmu->global_ctrl;
745
break;
746
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
747
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
748
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
749
msr_info->data = 0;
750
break;
751
default:
752
return kvm_pmu_call(get_msr)(vcpu, msr_info);
753
}
754
755
return 0;
756
}
757
758
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
759
{
760
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
761
u32 msr = msr_info->index;
762
u64 data = msr_info->data;
763
u64 diff;
764
765
/*
766
* Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
767
* whereas Intel generates #GP on attempts to write reserved/RO MSRs.
768
*/
769
switch (msr) {
770
case MSR_CORE_PERF_GLOBAL_STATUS:
771
if (!msr_info->host_initiated)
772
return 1; /* RO MSR */
773
fallthrough;
774
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
775
/* Per PPR, Read-only MSR. Writes are ignored. */
776
if (!msr_info->host_initiated)
777
break;
778
779
if (data & pmu->global_status_rsvd)
780
return 1;
781
782
pmu->global_status = data;
783
break;
784
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
785
data &= ~pmu->global_ctrl_rsvd;
786
fallthrough;
787
case MSR_CORE_PERF_GLOBAL_CTRL:
788
if (!kvm_valid_perf_global_ctrl(pmu, data))
789
return 1;
790
791
if (pmu->global_ctrl != data) {
792
diff = pmu->global_ctrl ^ data;
793
pmu->global_ctrl = data;
794
reprogram_counters(pmu, diff);
795
}
796
break;
797
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
798
/*
799
* GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
800
* GLOBAL_STATUS, and so the set of reserved bits is the same.
801
*/
802
if (data & pmu->global_status_rsvd)
803
return 1;
804
fallthrough;
805
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
806
if (!msr_info->host_initiated)
807
pmu->global_status &= ~data;
808
break;
809
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
810
if (!msr_info->host_initiated)
811
pmu->global_status |= data & ~pmu->global_status_rsvd;
812
break;
813
default:
814
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
815
return kvm_pmu_call(set_msr)(vcpu, msr_info);
816
}
817
818
return 0;
819
}
820
821
static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
822
{
823
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
824
struct kvm_pmc *pmc;
825
int i;
826
827
pmu->need_cleanup = false;
828
829
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
830
831
kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
832
pmc_stop_counter(pmc);
833
pmc->counter = 0;
834
pmc->emulated_counter = 0;
835
836
if (pmc_is_gp(pmc))
837
pmc->eventsel = 0;
838
}
839
840
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
841
842
kvm_pmu_call(reset)(vcpu);
843
}
844
845
846
/*
847
* Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
848
* and/or PERF_CAPABILITIES.
849
*/
850
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
851
{
852
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
853
854
if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
855
return;
856
857
/*
858
* Stop/release all existing counters/events before realizing the new
859
* vPMU model.
860
*/
861
kvm_pmu_reset(vcpu);
862
863
pmu->version = 0;
864
pmu->nr_arch_gp_counters = 0;
865
pmu->nr_arch_fixed_counters = 0;
866
pmu->counter_bitmask[KVM_PMC_GP] = 0;
867
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
868
pmu->reserved_bits = 0xffffffff00200000ull;
869
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
870
pmu->global_ctrl_rsvd = ~0ull;
871
pmu->global_status_rsvd = ~0ull;
872
pmu->fixed_ctr_ctrl_rsvd = ~0ull;
873
pmu->pebs_enable_rsvd = ~0ull;
874
pmu->pebs_data_cfg_rsvd = ~0ull;
875
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
876
877
if (!vcpu->kvm->arch.enable_pmu)
878
return;
879
880
kvm_pmu_call(refresh)(vcpu);
881
882
/*
883
* At RESET, both Intel and AMD CPUs set all enable bits for general
884
* purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
885
* was written for v1 PMUs don't unknowingly leave GP counters disabled
886
* in the global controls). Emulate that behavior when refreshing the
887
* PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
888
*/
889
if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
890
pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
891
892
bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
893
bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX,
894
pmu->nr_arch_fixed_counters);
895
}
896
897
void kvm_pmu_init(struct kvm_vcpu *vcpu)
898
{
899
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
900
901
memset(pmu, 0, sizeof(*pmu));
902
kvm_pmu_call(init)(vcpu);
903
}
904
905
/* Release perf_events for vPMCs that have been unused for a full time slice. */
906
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
907
{
908
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
909
struct kvm_pmc *pmc = NULL;
910
DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
911
int i;
912
913
pmu->need_cleanup = false;
914
915
bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
916
pmu->pmc_in_use, X86_PMC_IDX_MAX);
917
918
kvm_for_each_pmc(pmu, pmc, i, bitmask) {
919
if (pmc->perf_event && !pmc_is_locally_enabled(pmc))
920
pmc_stop_counter(pmc);
921
}
922
923
kvm_pmu_call(cleanup)(vcpu);
924
925
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
926
}
927
928
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
929
{
930
kvm_pmu_reset(vcpu);
931
}
932
933
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
934
{
935
pmc->emulated_counter++;
936
kvm_pmu_request_counter_reprogram(pmc);
937
}
938
939
static inline bool cpl_is_matched(struct kvm_pmc *pmc)
940
{
941
bool select_os, select_user;
942
u64 config;
943
944
if (pmc_is_gp(pmc)) {
945
config = pmc->eventsel;
946
select_os = config & ARCH_PERFMON_EVENTSEL_OS;
947
select_user = config & ARCH_PERFMON_EVENTSEL_USR;
948
} else {
949
config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
950
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
951
select_os = config & INTEL_FIXED_0_KERNEL;
952
select_user = config & INTEL_FIXED_0_USER;
953
}
954
955
/*
956
* Skip the CPL lookup, which isn't free on Intel, if the result will
957
* be the same regardless of the CPL.
958
*/
959
if (select_os == select_user)
960
return select_os;
961
962
return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os :
963
select_user;
964
}
965
966
static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu,
967
const unsigned long *event_pmcs)
968
{
969
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
970
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
971
struct kvm_pmc *pmc;
972
int i, idx;
973
974
BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
975
976
if (bitmap_empty(event_pmcs, X86_PMC_IDX_MAX))
977
return;
978
979
if (!kvm_pmu_has_perf_global_ctrl(pmu))
980
bitmap_copy(bitmap, event_pmcs, X86_PMC_IDX_MAX);
981
else if (!bitmap_and(bitmap, event_pmcs,
982
(unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
983
return;
984
985
idx = srcu_read_lock(&vcpu->kvm->srcu);
986
kvm_for_each_pmc(pmu, pmc, i, bitmap) {
987
if (!pmc_is_event_allowed(pmc) || !cpl_is_matched(pmc))
988
continue;
989
990
kvm_pmu_incr_counter(pmc);
991
}
992
srcu_read_unlock(&vcpu->kvm->srcu, idx);
993
}
994
995
void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)
996
{
997
kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_instructions);
998
}
999
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_instruction_retired);
1000
1001
void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
1002
{
1003
kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_branches);
1004
}
1005
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_branch_retired);
1006
1007
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
1008
{
1009
u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
1010
KVM_PMU_MASKED_ENTRY_UMASK_MASK |
1011
KVM_PMU_MASKED_ENTRY_UMASK_MATCH |
1012
KVM_PMU_MASKED_ENTRY_EXCLUDE;
1013
int i;
1014
1015
for (i = 0; i < filter->nevents; i++) {
1016
if (filter->events[i] & ~mask)
1017
return false;
1018
}
1019
1020
return true;
1021
}
1022
1023
static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
1024
{
1025
int i, j;
1026
1027
for (i = 0, j = 0; i < filter->nevents; i++) {
1028
/*
1029
* Skip events that are impossible to match against a guest
1030
* event. When filtering, only the event select + unit mask
1031
* of the guest event is used. To maintain backwards
1032
* compatibility, impossible filters can't be rejected :-(
1033
*/
1034
if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
1035
ARCH_PERFMON_EVENTSEL_UMASK))
1036
continue;
1037
/*
1038
* Convert userspace events to a common in-kernel event so
1039
* only one code path is needed to support both events. For
1040
* the in-kernel events use masked events because they are
1041
* flexible enough to handle both cases. To convert to masked
1042
* events all that's needed is to add an "all ones" umask_mask,
1043
* (unmasked filter events don't support EXCLUDE).
1044
*/
1045
filter->events[j++] = filter->events[i] |
1046
(0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT);
1047
}
1048
1049
filter->nevents = j;
1050
}
1051
1052
static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
1053
{
1054
int i;
1055
1056
if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS))
1057
convert_to_masked_filter(filter);
1058
else if (!is_masked_filter_valid(filter))
1059
return -EINVAL;
1060
1061
/*
1062
* Sort entries by event select and includes vs. excludes so that all
1063
* entries for a given event select can be processed efficiently during
1064
* filtering. The EXCLUDE flag uses a more significant bit than the
1065
* event select, and so the sorted list is also effectively split into
1066
* includes and excludes sub-lists.
1067
*/
1068
sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
1069
filter_sort_cmp, NULL);
1070
1071
i = filter->nevents;
1072
/* Find the first EXCLUDE event (only supported for masked events). */
1073
if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) {
1074
for (i = 0; i < filter->nevents; i++) {
1075
if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
1076
break;
1077
}
1078
}
1079
1080
filter->nr_includes = i;
1081
filter->nr_excludes = filter->nevents - filter->nr_includes;
1082
filter->includes = filter->events;
1083
filter->excludes = filter->events + filter->nr_includes;
1084
1085
return 0;
1086
}
1087
1088
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
1089
{
1090
struct kvm_pmu_event_filter __user *user_filter = argp;
1091
struct kvm_x86_pmu_event_filter *filter;
1092
struct kvm_pmu_event_filter tmp;
1093
struct kvm_vcpu *vcpu;
1094
unsigned long i;
1095
size_t size;
1096
int r;
1097
1098
if (copy_from_user(&tmp, user_filter, sizeof(tmp)))
1099
return -EFAULT;
1100
1101
if (tmp.action != KVM_PMU_EVENT_ALLOW &&
1102
tmp.action != KVM_PMU_EVENT_DENY)
1103
return -EINVAL;
1104
1105
if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
1106
return -EINVAL;
1107
1108
if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
1109
return -E2BIG;
1110
1111
size = struct_size(filter, events, tmp.nevents);
1112
filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
1113
if (!filter)
1114
return -ENOMEM;
1115
1116
filter->action = tmp.action;
1117
filter->nevents = tmp.nevents;
1118
filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
1119
filter->flags = tmp.flags;
1120
1121
r = -EFAULT;
1122
if (copy_from_user(filter->events, user_filter->events,
1123
sizeof(filter->events[0]) * filter->nevents))
1124
goto cleanup;
1125
1126
r = prepare_filter_lists(filter);
1127
if (r)
1128
goto cleanup;
1129
1130
mutex_lock(&kvm->lock);
1131
filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
1132
mutex_is_locked(&kvm->lock));
1133
mutex_unlock(&kvm->lock);
1134
synchronize_srcu_expedited(&kvm->srcu);
1135
1136
BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
1137
sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
1138
1139
kvm_for_each_vcpu(i, vcpu, kvm)
1140
atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
1141
1142
kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
1143
1144
r = 0;
1145
cleanup:
1146
kfree(filter);
1147
return r;
1148
}
1149
1150