Lines Matching full:pmu

3  * KVM PMU support for Intel CPUs
21 #include "pmu.h"
37 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument
40 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters()
43 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
44 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
51 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
53 __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); in reprogram_fixed_counters()
62 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local
74 * Yell and reject attempts to read PMCs for a non-architectural PMU, in intel_rdpmc_ecx_to_pmc()
77 if (WARN_ON_ONCE(!pmu->version)) in intel_rdpmc_ecx_to_pmc()
83 * supported by KVM. Note, KVM only emulates fixed PMCs for PMU v2+, in intel_rdpmc_ecx_to_pmc()
90 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
91 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
92 bitmask = pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_rdpmc_ecx_to_pmc()
95 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
96 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
97 bitmask = pmu->counter_bitmask[KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
124 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument
126 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) in get_fw_gp_pmc()
129 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc()
152 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
158 return kvm_pmu_has_perf_global_ctrl(pmu); in intel_is_valid_msr()
171 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
172 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
173 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || in intel_is_valid_msr()
183 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
186 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
187 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
188 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
207 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_create_guest_lbr_event() local
239 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
251 pmu->event_count++; in intel_pmu_create_guest_lbr_event()
252 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
300 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
306 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
309 msr_info->data = pmu->pebs_enable; in intel_pmu_get_msr()
312 msr_info->data = pmu->ds_area; in intel_pmu_get_msr()
315 msr_info->data = pmu->pebs_data_cfg; in intel_pmu_get_msr()
318 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
319 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
322 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
324 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
327 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
329 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
343 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
351 if (data & pmu->fixed_ctr_ctrl_rsvd) in intel_pmu_set_msr()
354 if (pmu->fixed_ctr_ctrl != data) in intel_pmu_set_msr()
355 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
358 if (data & pmu->pebs_enable_rsvd) in intel_pmu_set_msr()
361 if (pmu->pebs_enable != data) { in intel_pmu_set_msr()
362 diff = pmu->pebs_enable ^ data; in intel_pmu_set_msr()
363 pmu->pebs_enable = data; in intel_pmu_set_msr()
364 reprogram_counters(pmu, diff); in intel_pmu_set_msr()
371 pmu->ds_area = data; in intel_pmu_set_msr()
374 if (data & pmu->pebs_data_cfg_rsvd) in intel_pmu_set_msr()
377 pmu->pebs_data_cfg = data; in intel_pmu_set_msr()
380 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
381 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
383 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
391 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
394 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
395 reserved_bits = pmu->reserved_bits; in intel_pmu_set_msr()
397 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) in intel_pmu_set_msr()
410 /* Not a known PMU MSR. */ in intel_pmu_set_msr()
451 static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits) in intel_pmu_enable_fixed_counter_bits() argument
455 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) in intel_pmu_enable_fixed_counter_bits()
456 pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits); in intel_pmu_enable_fixed_counter_bits()
461 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
473 * and PMU refresh is disallowed after the vCPU has run, i.e. this code in intel_pmu_refresh()
486 pmu->version = eax.split.version_id; in intel_pmu_refresh()
487 if (!pmu->version) in intel_pmu_refresh()
490 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
494 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
497 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
500 if (pmu->version == 1) { in intel_pmu_refresh()
501 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
503 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh()
507 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
511 intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL | in intel_pmu_refresh()
515 counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
516 (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); in intel_pmu_refresh()
517 pmu->global_ctrl_rsvd = counter_rsvd; in intel_pmu_refresh()
524 pmu->global_status_rsvd = pmu->global_ctrl_rsvd in intel_pmu_refresh()
528 pmu->global_status_rsvd &= in intel_pmu_refresh()
535 pmu->reserved_bits ^= HSW_IN_TX; in intel_pmu_refresh()
536 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); in intel_pmu_refresh()
539 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
540 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
541 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
542 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
552 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); in intel_pmu_refresh()
556 pmu->pebs_enable_rsvd = counter_rsvd; in intel_pmu_refresh()
557 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; in intel_pmu_refresh()
558 pmu->pebs_data_cfg_rsvd = ~0xff00000full; in intel_pmu_refresh()
559 intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE); in intel_pmu_refresh()
561 pmu->pebs_enable_rsvd = in intel_pmu_refresh()
562 ~((1ull << pmu->nr_arch_gp_counters) - 1); in intel_pmu_refresh()
570 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
574 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
575 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
576 pmu->gp_counters[i].idx = i; in intel_pmu_init()
577 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
581 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
582 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
583 pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX; in intel_pmu_init()
584 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
585 pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i); in intel_pmu_init()
599 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
667 * pmu resources (e.g. LBR) that were assigned to the guest. This is
671 * confirm that the pmu features enabled to the guest are not reclaimed
677 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in vmx_passthrough_lbr_msrs() local
684 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) in vmx_passthrough_lbr_msrs()
691 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in vmx_passthrough_lbr_msrs()
708 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) in intel_pmu_cross_mapped_check() argument
713 kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) { in intel_pmu_cross_mapped_check()
724 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); in intel_pmu_cross_mapped_check()