Revert "KVM: arm64: PMU: Distinguish between 64bit counter and 64bit overflow"

This reverts commit 508d71a677.

It breaks the current Android kernel abi.  It will be brought back at
the next KABI break update.

Bug: 161946584
Change-Id: I1d646f16b36447aad59225c7a8a5e7bab1ca33f8
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-04-23 08:14:39 +00:00 committed by Matthias Männich
parent a8fa816c4d
commit 7ddb50fff2

View File

@ -50,11 +50,6 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
* @select_idx: The counter index
*/
static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
{
return (select_idx == ARMV8_PMU_CYCLE_IDX);
}
static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx)
{
return (select_idx == ARMV8_PMU_CYCLE_IDX &&
__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
@ -62,8 +57,7 @@ static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx
static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx)
{
return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX &&
!kvm_pmu_idx_has_64bit_overflow(vcpu, idx));
return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX);
}
static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
@ -103,7 +97,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
counter += perf_event_read_value(pmc->perf_event, &enabled,
&running);
if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
if (select_idx != ARMV8_PMU_CYCLE_IDX)
counter = lower_32_bits(counter);
return counter;
@ -429,23 +423,6 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
}
}
/* Compute the sample period for a given counter value */
static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter)
{
u64 val;
if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx))
val = -(counter & GENMASK(31, 0));
else
val = (-counter) & GENMASK(63, 0);
} else {
val = (-counter) & GENMASK(31, 0);
}
return val;
}
/**
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
@ -465,7 +442,10 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
* Reset the sample period to the architectural limit,
* i.e. the point where the counter overflows.
*/
period = compute_period(vcpu, idx, local64_read(&perf_event->count));
period = -(local64_read(&perf_event->count));
if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
period &= GENMASK(31, 0);
local64_set(&perf_event->hw.period_left, 0);
perf_event->attr.sample_period = period;
@ -591,13 +571,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
/*
* If counting with a 64bit counter, advertise it to the perf
* code, carefully dealing with the initial sample period
* which also depends on the overflow.
* code, carefully dealing with the initial sample period.
*/
if (kvm_pmu_idx_is_64bit(vcpu, select_idx))
if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
attr.sample_period = compute_period(vcpu, select_idx, counter);
attr.sample_period = (-counter) & GENMASK(63, 0);
} else {
attr.sample_period = (-counter) & GENMASK(31, 0);
}
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, pmc);