1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HiSilicon SoC DDRC uncore Hardware event counters support
4  *
5  * Copyright (C) 2017 HiSilicon Limited
6  * Author: Shaokun Zhang <[email protected]>
7  *         Anurup M <[email protected]>
8  *
9  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10  */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/smp.h>
18 
19 #include "hisi_uncore_pmu.h"
20 
21 /* DDRC register definition in v1 */
22 #define DDRC_PERF_CTRL		0x010
23 #define DDRC_FLUX_WR		0x380
24 #define DDRC_FLUX_RD		0x384
25 #define DDRC_FLUX_WCMD          0x388
26 #define DDRC_FLUX_RCMD          0x38c
27 #define DDRC_PRE_CMD            0x3c0
28 #define DDRC_ACT_CMD            0x3c4
29 #define DDRC_RNK_CHG            0x3cc
30 #define DDRC_RW_CHG             0x3d0
31 #define DDRC_EVENT_CTRL         0x6C0
32 #define DDRC_INT_MASK		0x6c8
33 #define DDRC_INT_STATUS		0x6cc
34 #define DDRC_INT_CLEAR		0x6d0
35 #define DDRC_VERSION		0x710
36 
37 /* DDRC register definition in v2 */
38 #define DDRC_V2_INT_MASK	0x528
39 #define DDRC_V2_INT_STATUS	0x52c
40 #define DDRC_V2_INT_CLEAR	0x530
41 #define DDRC_V2_EVENT_CNT	0xe00
42 #define DDRC_V2_EVENT_CTRL	0xe70
43 #define DDRC_V2_EVENT_TYPE	0xe74
44 #define DDRC_V2_PERF_CTRL	0xeA0
45 
46 /* DDRC has 8-counters */
47 #define DDRC_NR_COUNTERS	0x8
48 #define DDRC_V1_PERF_CTRL_EN	0x2
49 #define DDRC_V2_PERF_CTRL_EN	0x1
50 #define DDRC_V1_NR_EVENTS	0x7
51 #define DDRC_V2_NR_EVENTS	0x90
52 
53 /*
54  * For PMU v1, there are eight-events and every event has been mapped
55  * to fixed-purpose counters which register offset is not consistent.
56  * Therefore there is no write event type and we assume that event
57  * code (0 to 7) is equal to counter index in PMU driver.
58  */
59 #define GET_DDRC_EVENTID(hwc)	(hwc->config_base & 0x7)
60 
61 static const u32 ddrc_reg_off[] = {
62 	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
63 	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
64 };
65 
66 /*
67  * Select the counter register offset using the counter index.
68  * In PMU v1, there are no programmable counter, the count
69  * is read form the statistics counter register itself.
70  */
hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)71 static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
72 {
73 	return ddrc_reg_off[cntr_idx];
74 }
75 
hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)76 static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
77 {
78 	return DDRC_V2_EVENT_CNT + cntr_idx * 8;
79 }
80 
hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)81 static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
82 				      struct hw_perf_event *hwc)
83 {
84 	return readl(ddrc_pmu->base +
85 		     hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
86 }
87 
hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc,u64 val)88 static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
89 					struct hw_perf_event *hwc, u64 val)
90 {
91 	writel((u32)val,
92 	       ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
93 }
94 
hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)95 static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
96 					 struct hw_perf_event *hwc)
97 {
98 	return readq(ddrc_pmu->base +
99 		     hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
100 }
101 
hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc,u64 val)102 static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
103 					   struct hw_perf_event *hwc, u64 val)
104 {
105 	writeq(val,
106 	       ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
107 }
108 
109 /*
110  * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
111  * so there is no need to write event type, while it is programmable counter in
112  * PMU v2.
113  */
hisi_ddrc_pmu_write_evtype(struct hisi_pmu * ddrc_pmu,int idx,u32 type)114 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *ddrc_pmu, int idx,
115 				       u32 type)
116 {
117 	u32 offset;
118 
119 	if (ddrc_pmu->identifier >= HISI_PMU_V2) {
120 		offset = DDRC_V2_EVENT_TYPE + 4 * idx;
121 		writel(type, ddrc_pmu->base + offset);
122 	}
123 }
124 
hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu * ddrc_pmu)125 static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
126 {
127 	u32 val;
128 
129 	/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
130 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
131 	val |= DDRC_V1_PERF_CTRL_EN;
132 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
133 }
134 
hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu * ddrc_pmu)135 static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
136 {
137 	u32 val;
138 
139 	/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
140 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
141 	val &= ~DDRC_V1_PERF_CTRL_EN;
142 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
143 }
144 
hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)145 static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
146 					    struct hw_perf_event *hwc)
147 {
148 	u32 val;
149 
150 	/* Set counter index(event code) in DDRC_EVENT_CTRL register */
151 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
152 	val |= (1 << GET_DDRC_EVENTID(hwc));
153 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
154 }
155 
hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)156 static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
157 					     struct hw_perf_event *hwc)
158 {
159 	u32 val;
160 
161 	/* Clear counter index(event code) in DDRC_EVENT_CTRL register */
162 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
163 	val &= ~(1 << GET_DDRC_EVENTID(hwc));
164 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
165 }
166 
hisi_ddrc_pmu_v1_get_event_idx(struct perf_event * event)167 static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
168 {
169 	struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
170 	unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
171 	struct hw_perf_event *hwc = &event->hw;
172 	/* For DDRC PMU, we use event code as counter index */
173 	int idx = GET_DDRC_EVENTID(hwc);
174 
175 	if (test_bit(idx, used_mask))
176 		return -EAGAIN;
177 
178 	set_bit(idx, used_mask);
179 
180 	return idx;
181 }
182 
hisi_ddrc_pmu_v2_get_event_idx(struct perf_event * event)183 static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
184 {
185 	return hisi_uncore_pmu_get_event_idx(event);
186 }
187 
hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu * ddrc_pmu)188 static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
189 {
190 	u32 val;
191 
192 	val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
193 	val |= DDRC_V2_PERF_CTRL_EN;
194 	writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
195 }
196 
hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu * ddrc_pmu)197 static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
198 {
199 	u32 val;
200 
201 	val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
202 	val &= ~DDRC_V2_PERF_CTRL_EN;
203 	writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
204 }
205 
hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)206 static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
207 					    struct hw_perf_event *hwc)
208 {
209 	u32 val;
210 
211 	val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
212 	val |= 1 << hwc->idx;
213 	writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
214 }
215 
hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)216 static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
217 					     struct hw_perf_event *hwc)
218 {
219 	u32 val;
220 
221 	val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
222 	val &= ~(1 << hwc->idx);
223 	writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
224 }
225 
hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)226 static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
227 						struct hw_perf_event *hwc)
228 {
229 	u32 val;
230 
231 	/* Write 0 to enable interrupt */
232 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
233 	val &= ~(1 << hwc->idx);
234 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
235 }
236 
hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)237 static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
238 						 struct hw_perf_event *hwc)
239 {
240 	u32 val;
241 
242 	/* Write 1 to mask interrupt */
243 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
244 	val |= 1 << hwc->idx;
245 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
246 }
247 
hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)248 static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
249 						struct hw_perf_event *hwc)
250 {
251 	u32 val;
252 
253 	val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
254 	val &= ~(1 << hwc->idx);
255 	writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
256 }
257 
hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu * ddrc_pmu,struct hw_perf_event * hwc)258 static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
259 						struct hw_perf_event *hwc)
260 {
261 	u32 val;
262 
263 	val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
264 	val |= 1 << hwc->idx;
265 	writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
266 }
267 
hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu * ddrc_pmu)268 static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
269 {
270 	return readl(ddrc_pmu->base + DDRC_INT_STATUS);
271 }
272 
hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu * ddrc_pmu,int idx)273 static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
274 					      int idx)
275 {
276 	writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
277 }
278 
hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu * ddrc_pmu)279 static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
280 {
281 	return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
282 }
283 
hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu * ddrc_pmu,int idx)284 static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
285 					      int idx)
286 {
287 	writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
288 }
289 
290 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
291 	{ "HISI0233", },
292 	{ "HISI0234", },
293 	{}
294 };
295 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
296 
hisi_ddrc_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * ddrc_pmu)297 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
298 				   struct hisi_pmu *ddrc_pmu)
299 {
300 	hisi_uncore_pmu_init_topology(ddrc_pmu, &pdev->dev);
301 
302 	/*
303 	 * Use the SCCL_ID and DDRC channel ID to identify the
304 	 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
305 	 */
306 	if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
307 				     &ddrc_pmu->topo.index_id)) {
308 		dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
309 		return -EINVAL;
310 	}
311 
312 	if (ddrc_pmu->topo.sccl_id < 0) {
313 		dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
314 		return -EINVAL;
315 	}
316 
317 	ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
318 	if (IS_ERR(ddrc_pmu->base)) {
319 		dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
320 		return PTR_ERR(ddrc_pmu->base);
321 	}
322 
323 	ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
324 	if (ddrc_pmu->identifier >= HISI_PMU_V2) {
325 		if (ddrc_pmu->topo.sub_id < 0) {
326 			dev_err(&pdev->dev, "Can not read sub-id!\n");
327 			return -EINVAL;
328 		}
329 	}
330 
331 	return 0;
332 }
333 
334 static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
335 	HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
336 	NULL,
337 };
338 
339 static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
340 	.name = "format",
341 	.attrs = hisi_ddrc_pmu_v1_format_attr,
342 };
343 
344 static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
345 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
346 	NULL
347 };
348 
349 static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
350 	.name = "format",
351 	.attrs = hisi_ddrc_pmu_v2_format_attr,
352 };
353 
354 static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
355 	HISI_PMU_EVENT_ATTR(flux_wr,		0x00),
356 	HISI_PMU_EVENT_ATTR(flux_rd,		0x01),
357 	HISI_PMU_EVENT_ATTR(flux_wcmd,		0x02),
358 	HISI_PMU_EVENT_ATTR(flux_rcmd,		0x03),
359 	HISI_PMU_EVENT_ATTR(pre_cmd,		0x04),
360 	HISI_PMU_EVENT_ATTR(act_cmd,		0x05),
361 	HISI_PMU_EVENT_ATTR(rnk_chg,		0x06),
362 	HISI_PMU_EVENT_ATTR(rw_chg,		0x07),
363 	NULL,
364 };
365 
366 static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
367 	.name = "events",
368 	.attrs = hisi_ddrc_pmu_v1_events_attr,
369 };
370 
371 static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
372 	HISI_PMU_EVENT_ATTR(cycles,		0x00),
373 	HISI_PMU_EVENT_ATTR(flux_wr,		0x83),
374 	HISI_PMU_EVENT_ATTR(flux_rd,		0x84),
375 	NULL
376 };
377 
378 static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
379 	.name = "events",
380 	.attrs = hisi_ddrc_pmu_v2_events_attr,
381 };
382 
383 static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
384 	&hisi_ddrc_pmu_v1_format_group,
385 	&hisi_ddrc_pmu_v1_events_group,
386 	&hisi_pmu_cpumask_attr_group,
387 	&hisi_pmu_identifier_group,
388 	NULL,
389 };
390 
391 static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
392 	&hisi_ddrc_pmu_v2_format_group,
393 	&hisi_ddrc_pmu_v2_events_group,
394 	&hisi_pmu_cpumask_attr_group,
395 	&hisi_pmu_identifier_group,
396 	NULL
397 };
398 
399 static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
400 	.write_evtype           = hisi_ddrc_pmu_write_evtype,
401 	.get_event_idx		= hisi_ddrc_pmu_v1_get_event_idx,
402 	.start_counters		= hisi_ddrc_pmu_v1_start_counters,
403 	.stop_counters		= hisi_ddrc_pmu_v1_stop_counters,
404 	.enable_counter		= hisi_ddrc_pmu_v1_enable_counter,
405 	.disable_counter	= hisi_ddrc_pmu_v1_disable_counter,
406 	.enable_counter_int	= hisi_ddrc_pmu_v1_enable_counter_int,
407 	.disable_counter_int	= hisi_ddrc_pmu_v1_disable_counter_int,
408 	.write_counter		= hisi_ddrc_pmu_v1_write_counter,
409 	.read_counter		= hisi_ddrc_pmu_v1_read_counter,
410 	.get_int_status		= hisi_ddrc_pmu_v1_get_int_status,
411 	.clear_int_status	= hisi_ddrc_pmu_v1_clear_int_status,
412 };
413 
414 static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
415 	.write_evtype           = hisi_ddrc_pmu_write_evtype,
416 	.get_event_idx		= hisi_ddrc_pmu_v2_get_event_idx,
417 	.start_counters		= hisi_ddrc_pmu_v2_start_counters,
418 	.stop_counters		= hisi_ddrc_pmu_v2_stop_counters,
419 	.enable_counter		= hisi_ddrc_pmu_v2_enable_counter,
420 	.disable_counter	= hisi_ddrc_pmu_v2_disable_counter,
421 	.enable_counter_int	= hisi_ddrc_pmu_v2_enable_counter_int,
422 	.disable_counter_int	= hisi_ddrc_pmu_v2_disable_counter_int,
423 	.write_counter		= hisi_ddrc_pmu_v2_write_counter,
424 	.read_counter		= hisi_ddrc_pmu_v2_read_counter,
425 	.get_int_status		= hisi_ddrc_pmu_v2_get_int_status,
426 	.clear_int_status	= hisi_ddrc_pmu_v2_clear_int_status,
427 };
428 
hisi_ddrc_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * ddrc_pmu)429 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
430 				   struct hisi_pmu *ddrc_pmu)
431 {
432 	int ret;
433 
434 	ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
435 	if (ret)
436 		return ret;
437 
438 	ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
439 	if (ret)
440 		return ret;
441 
442 	if (ddrc_pmu->identifier >= HISI_PMU_V2) {
443 		ddrc_pmu->counter_bits = 48;
444 		ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
445 		ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
446 		ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
447 	} else {
448 		ddrc_pmu->counter_bits = 32;
449 		ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
450 		ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
451 		ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
452 	}
453 
454 	ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
455 	ddrc_pmu->dev = &pdev->dev;
456 	ddrc_pmu->on_cpu = -1;
457 
458 	return 0;
459 }
460 
hisi_ddrc_pmu_probe(struct platform_device * pdev)461 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
462 {
463 	struct hisi_pmu *ddrc_pmu;
464 	char *name;
465 	int ret;
466 
467 	ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
468 	if (!ddrc_pmu)
469 		return -ENOMEM;
470 
471 	platform_set_drvdata(pdev, ddrc_pmu);
472 
473 	ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
474 	if (ret)
475 		return ret;
476 
477 	if (ddrc_pmu->identifier >= HISI_PMU_V2)
478 		name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
479 				      "hisi_sccl%d_ddrc%d_%d",
480 				      ddrc_pmu->topo.sccl_id, ddrc_pmu->topo.index_id,
481 				      ddrc_pmu->topo.sub_id);
482 	else
483 		name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
484 				      "hisi_sccl%d_ddrc%d", ddrc_pmu->topo.sccl_id,
485 				      ddrc_pmu->topo.index_id);
486 
487 	if (!name)
488 		return -ENOMEM;
489 
490 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
491 				       &ddrc_pmu->node);
492 	if (ret) {
493 		dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
494 		return ret;
495 	}
496 
497 	hisi_pmu_init(ddrc_pmu, THIS_MODULE);
498 
499 	ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
500 	if (ret) {
501 		dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
502 		cpuhp_state_remove_instance_nocalls(
503 			CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
504 	}
505 
506 	return ret;
507 }
508 
hisi_ddrc_pmu_remove(struct platform_device * pdev)509 static void hisi_ddrc_pmu_remove(struct platform_device *pdev)
510 {
511 	struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
512 
513 	perf_pmu_unregister(&ddrc_pmu->pmu);
514 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
515 					    &ddrc_pmu->node);
516 }
517 
518 static struct platform_driver hisi_ddrc_pmu_driver = {
519 	.driver = {
520 		.name = "hisi_ddrc_pmu",
521 		.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
522 		.suppress_bind_attrs = true,
523 	},
524 	.probe = hisi_ddrc_pmu_probe,
525 	.remove = hisi_ddrc_pmu_remove,
526 };
527 
hisi_ddrc_pmu_module_init(void)528 static int __init hisi_ddrc_pmu_module_init(void)
529 {
530 	int ret;
531 
532 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
533 				      "AP_PERF_ARM_HISI_DDRC_ONLINE",
534 				      hisi_uncore_pmu_online_cpu,
535 				      hisi_uncore_pmu_offline_cpu);
536 	if (ret) {
537 		pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
538 		return ret;
539 	}
540 
541 	ret = platform_driver_register(&hisi_ddrc_pmu_driver);
542 	if (ret)
543 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
544 
545 	return ret;
546 }
547 module_init(hisi_ddrc_pmu_module_init);
548 
hisi_ddrc_pmu_module_exit(void)549 static void __exit hisi_ddrc_pmu_module_exit(void)
550 {
551 	platform_driver_unregister(&hisi_ddrc_pmu_driver);
552 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
553 }
554 module_exit(hisi_ddrc_pmu_module_exit);
555 
556 MODULE_IMPORT_NS("HISI_PMU");
557 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
558 MODULE_LICENSE("GPL v2");
559 MODULE_AUTHOR("Shaokun Zhang <[email protected]>");
560 MODULE_AUTHOR("Anurup M <[email protected]>");
561