1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Performance monitoring support for Virtual Processor Area(VPA) based counters
4 *
5 * Copyright (C) 2024 IBM Corporation
6 */
7 #define pr_fmt(fmt) "vpa_pmu: " fmt
8
9 #include <linux/module.h>
10 #include <linux/perf_event.h>
11 #include <asm/kvm_ppc.h>
12 #include <asm/kvm_book3s_64.h>
13
14 #define MODULE_VERS "1.0"
15 #define MODULE_NAME "pseries_vpa_pmu"
16
17 #define EVENT(_name, _code) enum{_name = _code}
18
19 #define VPA_PMU_EVENT_VAR(_id) event_attr_##_id
20 #define VPA_PMU_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
21
vpa_pmu_events_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)22 static ssize_t vpa_pmu_events_sysfs_show(struct device *dev,
23 struct device_attribute *attr, char *page)
24 {
25 struct perf_pmu_events_attr *pmu_attr;
26
27 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
28
29 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
30 }
31
32 #define VPA_PMU_EVENT_ATTR(_name, _id) \
33 PMU_EVENT_ATTR(_name, VPA_PMU_EVENT_VAR(_id), _id, \
34 vpa_pmu_events_sysfs_show)
35
36 EVENT(L1_TO_L2_CS_LAT, 0x1);
37 EVENT(L2_TO_L1_CS_LAT, 0x2);
38 EVENT(L2_RUNTIME_AGG, 0x3);
39
40 VPA_PMU_EVENT_ATTR(l1_to_l2_lat, L1_TO_L2_CS_LAT);
41 VPA_PMU_EVENT_ATTR(l2_to_l1_lat, L2_TO_L1_CS_LAT);
42 VPA_PMU_EVENT_ATTR(l2_runtime_agg, L2_RUNTIME_AGG);
43
44 static struct attribute *vpa_pmu_events_attr[] = {
45 VPA_PMU_EVENT_PTR(L1_TO_L2_CS_LAT),
46 VPA_PMU_EVENT_PTR(L2_TO_L1_CS_LAT),
47 VPA_PMU_EVENT_PTR(L2_RUNTIME_AGG),
48 NULL
49 };
50
51 static const struct attribute_group vpa_pmu_events_group = {
52 .name = "events",
53 .attrs = vpa_pmu_events_attr,
54 };
55
56 PMU_FORMAT_ATTR(event, "config:0-31");
57 static struct attribute *vpa_pmu_format_attr[] = {
58 &format_attr_event.attr,
59 NULL,
60 };
61
62 static struct attribute_group vpa_pmu_format_group = {
63 .name = "format",
64 .attrs = vpa_pmu_format_attr,
65 };
66
67 static const struct attribute_group *vpa_pmu_attr_groups[] = {
68 &vpa_pmu_events_group,
69 &vpa_pmu_format_group,
70 NULL
71 };
72
vpa_pmu_event_init(struct perf_event * event)73 static int vpa_pmu_event_init(struct perf_event *event)
74 {
75 if (event->attr.type != event->pmu->type)
76 return -ENOENT;
77
78 /* it does not support event sampling mode */
79 if (is_sampling_event(event))
80 return -EOPNOTSUPP;
81
82 /* no branch sampling */
83 if (has_branch_stack(event))
84 return -EOPNOTSUPP;
85
86 /* Invalid event code */
87 if ((event->attr.config <= 0) || (event->attr.config > 3))
88 return -EINVAL;
89
90 return 0;
91 }
92
get_counter_data(struct perf_event * event)93 static unsigned long get_counter_data(struct perf_event *event)
94 {
95 unsigned int config = event->attr.config;
96 u64 data;
97
98 switch (config) {
99 case L1_TO_L2_CS_LAT:
100 if (event->attach_state & PERF_ATTACH_TASK)
101 data = kvmhv_get_l1_to_l2_cs_time_vcpu();
102 else
103 data = kvmhv_get_l1_to_l2_cs_time();
104 break;
105 case L2_TO_L1_CS_LAT:
106 if (event->attach_state & PERF_ATTACH_TASK)
107 data = kvmhv_get_l2_to_l1_cs_time_vcpu();
108 else
109 data = kvmhv_get_l2_to_l1_cs_time();
110 break;
111 case L2_RUNTIME_AGG:
112 if (event->attach_state & PERF_ATTACH_TASK)
113 data = kvmhv_get_l2_runtime_agg_vcpu();
114 else
115 data = kvmhv_get_l2_runtime_agg();
116 break;
117 default:
118 data = 0;
119 break;
120 }
121
122 return data;
123 }
124
vpa_pmu_add(struct perf_event * event,int flags)125 static int vpa_pmu_add(struct perf_event *event, int flags)
126 {
127 u64 data;
128
129 kvmhv_set_l2_counters_status(smp_processor_id(), true);
130
131 data = get_counter_data(event);
132 local64_set(&event->hw.prev_count, data);
133
134 return 0;
135 }
136
vpa_pmu_read(struct perf_event * event)137 static void vpa_pmu_read(struct perf_event *event)
138 {
139 u64 prev_data, new_data, final_data;
140
141 prev_data = local64_read(&event->hw.prev_count);
142 new_data = get_counter_data(event);
143 final_data = new_data - prev_data;
144
145 local64_add(final_data, &event->count);
146 }
147
vpa_pmu_del(struct perf_event * event,int flags)148 static void vpa_pmu_del(struct perf_event *event, int flags)
149 {
150 vpa_pmu_read(event);
151
152 /*
153 * Disable vpa counter accumulation
154 */
155 kvmhv_set_l2_counters_status(smp_processor_id(), false);
156 }
157
158 static struct pmu vpa_pmu = {
159 .module = THIS_MODULE,
160 .task_ctx_nr = perf_sw_context,
161 .name = "vpa_pmu",
162 .event_init = vpa_pmu_event_init,
163 .add = vpa_pmu_add,
164 .del = vpa_pmu_del,
165 .read = vpa_pmu_read,
166 .attr_groups = vpa_pmu_attr_groups,
167 .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
168 };
169
pseries_vpa_pmu_init(void)170 static int __init pseries_vpa_pmu_init(void)
171 {
172 /*
173 * List of current Linux on Power platforms and
174 * this driver is supported only in PowerVM LPAR
175 * (L1) platform.
176 *
177 * Enabled Linux on Power Platforms
178 * ----------------------------------------
179 * [X] PowerVM LPAR (L1)
180 * [ ] KVM Guest On PowerVM KoP(L2)
181 * [ ] Baremetal(PowerNV)
182 * [ ] KVM Guest On PowerNV
183 */
184 if (!firmware_has_feature(FW_FEATURE_LPAR) || is_kvm_guest())
185 return -ENODEV;
186
187 perf_pmu_register(&vpa_pmu, vpa_pmu.name, -1);
188 pr_info("Virtual Processor Area PMU registered.\n");
189
190 return 0;
191 }
192
pseries_vpa_pmu_cleanup(void)193 static void __exit pseries_vpa_pmu_cleanup(void)
194 {
195 perf_pmu_unregister(&vpa_pmu);
196 pr_info("Virtual Processor Area PMU unregistered.\n");
197 }
198
199 module_init(pseries_vpa_pmu_init);
200 module_exit(pseries_vpa_pmu_cleanup);
201 MODULE_DESCRIPTION("Perf Driver for pSeries VPA pmu counter");
202 MODULE_AUTHOR("Kajol Jain <[email protected]>");
203 MODULE_AUTHOR("Madhavan Srinivasan <[email protected]>");
204 MODULE_LICENSE("GPL");
205