summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c141
1 files changed, 108 insertions, 33 deletions
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 9ef704c2636f..86391a5061dd 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -202,12 +202,18 @@ static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
u64 enabled, running;
+ int fevent_code;
pmc = &kvpmu->pmc[cidx];
- if (!pmc->perf_event)
- return -EINVAL;
- pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running);
+ if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
+ fevent_code = get_event_code(pmc->event_idx);
+ pmc->counter_val = kvpmu->fw_event[fevent_code].value;
+ } else if (pmc->perf_event) {
+ pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running);
+ } else {
+ return -EINVAL;
+ }
*out_val = pmc->counter_val;
return 0;
@@ -223,6 +229,52 @@ static int kvm_pmu_validate_counter_mask(struct kvm_pmu *kvpmu, unsigned long ct
return 0;
}
+static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr,
+ unsigned long flags, unsigned long eidx, unsigned long evtdata)
+{
+ struct perf_event *event;
+
+ kvm_pmu_release_perf_event(pmc);
+ attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata);
+ if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE) {
+ //TODO: Do we really want to clear the value in hardware counter
+ pmc->counter_val = 0;
+ }
+
+ /*
+ * Set the default sample_period for now. The guest specified value
+ * will be updated in the start call.
+ */
+ attr->sample_period = kvm_pmu_get_sample_period(pmc);
+
+ event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc);
+ if (IS_ERR(event)) {
+ pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
+ return PTR_ERR(event);
+ }
+
+ pmc->perf_event = event;
+ if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
+ perf_event_enable(pmc->perf_event);
+
+ return 0;
+}
+
+int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
+{
+ struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+ struct kvm_fw_event *fevent;
+
+ if (!kvpmu || fid >= SBI_PMU_FW_MAX)
+ return -EINVAL;
+
+ fevent = &kvpmu->fw_event[fid];
+ if (fevent->started)
+ fevent->value++;
+
+ return 0;
+}
+
int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
@@ -290,6 +342,7 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
int i, pmc_index, sbiret = 0;
struct kvm_pmc *pmc;
+ int fevent_code;
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
@@ -304,7 +357,22 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
pmc = &kvpmu->pmc[pmc_index];
if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
pmc->counter_val = ival;
- if (pmc->perf_event) {
+ if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
+ fevent_code = get_event_code(pmc->event_idx);
+ if (fevent_code >= SBI_PMU_FW_MAX) {
+ sbiret = SBI_ERR_INVALID_PARAM;
+ goto out;
+ }
+
+ /* Check if the counter was already started for some reason */
+ if (kvpmu->fw_event[fevent_code].started) {
+ sbiret = SBI_ERR_ALREADY_STARTED;
+ continue;
+ }
+
+ kvpmu->fw_event[fevent_code].started = true;
+ kvpmu->fw_event[fevent_code].value = pmc->counter_val;
+ } else if (pmc->perf_event) {
if (unlikely(pmc->started)) {
sbiret = SBI_ERR_ALREADY_STARTED;
continue;
@@ -331,6 +399,7 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
int i, pmc_index, sbiret = 0;
u64 enabled, running;
struct kvm_pmc *pmc;
+ int fevent_code;
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
@@ -343,7 +412,18 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
continue;
pmc = &kvpmu->pmc[pmc_index];
- if (pmc->perf_event) {
+ if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
+ fevent_code = get_event_code(pmc->event_idx);
+ if (fevent_code >= SBI_PMU_FW_MAX) {
+ sbiret = SBI_ERR_INVALID_PARAM;
+ goto out;
+ }
+
+ if (!kvpmu->fw_event[fevent_code].started)
+ sbiret = SBI_ERR_ALREADY_STOPPED;
+
+ kvpmu->fw_event[fevent_code].started = false;
+ } else if (pmc->perf_event) {
if (pmc->started) {
/* Stop counting the counter */
perf_event_disable(pmc->perf_event);
@@ -357,11 +437,14 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
pmc->counter_val += perf_event_read_value(pmc->perf_event,
&enabled, &running);
kvm_pmu_release_perf_event(pmc);
- clear_bit(pmc_index, kvpmu->pmc_in_use);
}
} else {
sbiret = SBI_ERR_INVALID_PARAM;
}
+ if (flags & SBI_PMU_STOP_FLAG_RESET) {
+ pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
+ clear_bit(pmc_index, kvpmu->pmc_in_use);
+ }
}
out:
@@ -375,12 +458,12 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
unsigned long eidx, u64 evtdata,
struct kvm_vcpu_sbi_return *retdata)
{
- int ctr_idx, sbiret = 0;
- u64 config;
+ int ctr_idx, ret, sbiret = 0;
+ bool is_fevent;
+ unsigned long event_code;
u32 etype = kvm_pmu_get_perf_event_type(eidx);
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
- struct perf_event *event;
- struct kvm_pmc *pmc;
+ struct kvm_pmc *pmc = NULL;
struct perf_event_attr attr = {
.type = etype,
.size = sizeof(struct perf_event_attr),
@@ -401,7 +484,9 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
goto out;
}
- if (kvm_pmu_is_fw_event(eidx)) {
+ event_code = get_event_code(eidx);
+ is_fevent = kvm_pmu_is_fw_event(eidx);
+ if (is_fevent && event_code >= SBI_PMU_FW_MAX) {
sbiret = SBI_ERR_NOT_SUPPORTED;
goto out;
}
@@ -425,33 +510,19 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
}
pmc = &kvpmu->pmc[ctr_idx];
- kvm_pmu_release_perf_event(pmc);
pmc->idx = ctr_idx;
- config = kvm_pmu_get_perf_event_config(eidx, evtdata);
- attr.config = config;
- if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE) {
- //TODO: Do we really want to clear the value in hardware counter
- pmc->counter_val = 0;
- }
-
- /*
- * Set the default sample_period for now. The guest specified value
- * will be updated in the start call.
- */
- attr.sample_period = kvm_pmu_get_sample_period(pmc);
-
- event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
- if (IS_ERR(event)) {
- pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
- return PTR_ERR(event);
+ if (is_fevent) {
+ if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
+ kvpmu->fw_event[event_code].started = true;
+ } else {
+ ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata);
+ if (ret)
+ return ret;
}
set_bit(ctr_idx, kvpmu->pmc_in_use);
- pmc->perf_event = event;
- if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
- perf_event_enable(pmc->perf_event);
-
+ pmc->event_idx = eidx;
retdata->out_val = ctr_idx;
out:
retdata->err_val = sbiret;
@@ -494,6 +565,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
*/
kvpmu->num_hw_ctrs = num_hw_ctrs + 1;
kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX;
+ memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) {
pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA");
@@ -512,6 +584,7 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
continue;
pmc = &kvpmu->pmc[i];
pmc->idx = i;
+ pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
if (i < kvpmu->num_hw_ctrs) {
pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW;
if (i < 3)
@@ -548,8 +621,10 @@ void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu)
pmc = &kvpmu->pmc[i];
pmc->counter_val = 0;
kvm_pmu_release_perf_event(pmc);
+ pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
}
bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS);
+ memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
}
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu)