Lines Matching full:pmu
148 struct pmu pmu; member
161 void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
163 void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
165 void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
166 void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
167 void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
170 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
365 struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev); in cn10k_ddr_perf_cpumask_show() local
367 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in cn10k_ddr_perf_cpumask_show()
438 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_alloc_counter() argument
446 pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter()
452 pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter()
458 if (pmu->events[i] == NULL) { in cn10k_ddr_perf_alloc_counter()
459 pmu->events[i] = event; in cn10k_ddr_perf_alloc_counter()
467 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_free_counter() argument
469 pmu->events[counter] = NULL; in cn10k_ddr_perf_free_counter()
474 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_init() local
477 if (event->attr.type != event->pmu->type) in cn10k_ddr_perf_event_init()
481 dev_info(pmu->dev, "Sampling not supported!\n"); in cn10k_ddr_perf_event_init()
486 dev_warn(pmu->dev, "Can't provide per-task data!\n"); in cn10k_ddr_perf_event_init()
491 if (event->group_leader->pmu != event->pmu && in cn10k_ddr_perf_event_init()
498 event->cpu = pmu->cpu; in cn10k_ddr_perf_event_init()
523 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_counter_enable() argument
526 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_counter_enable()
527 u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl; in cn10k_ddr_perf_counter_enable()
528 const struct ddr_pmu_ops *ops = pmu->ops; in cn10k_ddr_perf_counter_enable()
529 bool is_ody = pmu->p_data->is_ody; in cn10k_ddr_perf_counter_enable()
540 val = readq_relaxed(pmu->base + reg); in cn10k_ddr_perf_counter_enable()
547 writeq_relaxed(val, pmu->base + reg); in cn10k_ddr_perf_counter_enable()
552 * Setup the PMU counter to work in in cn10k_ddr_perf_counter_enable()
557 pmu->base + reg); in cn10k_ddr_perf_counter_enable()
559 cn10k_ddr_perf_counter_start(pmu, counter); in cn10k_ddr_perf_counter_enable()
561 cn10k_ddr_perf_counter_stop(pmu, counter); in cn10k_ddr_perf_counter_enable()
566 ops->enable_read_freerun_counter(pmu, enable); in cn10k_ddr_perf_counter_enable()
568 ops->enable_write_freerun_counter(pmu, enable); in cn10k_ddr_perf_counter_enable()
572 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_read_counter() argument
574 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_read_counter()
578 return readq_relaxed(pmu->base + in cn10k_ddr_perf_read_counter()
582 return readq_relaxed(pmu->base + in cn10k_ddr_perf_read_counter()
585 val = readq_relaxed(pmu->base + in cn10k_ddr_perf_read_counter()
592 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_update() local
593 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_event_update()
599 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_perf_event_update()
609 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_start() local
615 cn10k_ddr_perf_counter_enable(pmu, counter, true); in cn10k_ddr_perf_event_start()
622 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_add() local
623 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_perf_event_add()
624 const struct ddr_pmu_ops *ops = pmu->ops; in cn10k_ddr_perf_event_add()
631 counter = cn10k_ddr_perf_alloc_counter(pmu, event); in cn10k_ddr_perf_event_add()
635 pmu->active_events++; in cn10k_ddr_perf_event_add()
638 if (pmu->active_events == 1) in cn10k_ddr_perf_event_add()
639 hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(), in cn10k_ddr_perf_event_add()
645 ret = ddr_perf_get_event_bitmap(config, &val, pmu); in cn10k_ddr_perf_event_add()
649 writeq_relaxed(val, pmu->base + reg_offset); in cn10k_ddr_perf_event_add()
653 ops->clear_read_freerun_counter(pmu); in cn10k_ddr_perf_event_add()
655 ops->clear_write_freerun_counter(pmu); in cn10k_ddr_perf_event_add()
668 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_stop() local
672 cn10k_ddr_perf_counter_enable(pmu, counter, false); in cn10k_ddr_perf_event_stop()
682 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_del() local
688 cn10k_ddr_perf_free_counter(pmu, counter); in cn10k_ddr_perf_event_del()
689 pmu->active_events--; in cn10k_ddr_perf_event_del()
693 if (pmu->active_events == 0) in cn10k_ddr_perf_event_del()
694 hrtimer_cancel(&pmu->hrtimer); in cn10k_ddr_perf_event_del()
697 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu) in cn10k_ddr_perf_pmu_enable() argument
699 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); in cn10k_ddr_perf_pmu_enable()
706 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu) in cn10k_ddr_perf_pmu_disable() argument
708 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); in cn10k_ddr_perf_pmu_disable()
715 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu) in cn10k_ddr_perf_event_update_all() argument
721 if (pmu->events[i] == NULL) in cn10k_ddr_perf_event_update_all()
724 cn10k_ddr_perf_event_update(pmu->events[i]); in cn10k_ddr_perf_event_update_all()
729 if (pmu->events[i] == NULL) in cn10k_ddr_perf_event_update_all()
732 hwc = &pmu->events[i]->hw; in cn10k_ddr_perf_event_update_all()
737 static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable) in ddr_pmu_enable_read_freerun() argument
739 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_enable_read_freerun()
742 val = readq_relaxed(pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_read_freerun()
748 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_read_freerun()
751 static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable) in ddr_pmu_enable_write_freerun() argument
753 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_enable_write_freerun()
756 val = readq_relaxed(pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_write_freerun()
762 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en); in ddr_pmu_enable_write_freerun()
765 static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_read_clear_freerun() argument
767 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_read_clear_freerun()
771 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_read_clear_freerun()
774 static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_write_clear_freerun() argument
776 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_write_clear_freerun()
780 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_write_clear_freerun()
783 static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx) in ddr_pmu_overflow_hander() argument
785 cn10k_ddr_perf_event_update_all(pmu); in ddr_pmu_overflow_hander()
786 cn10k_ddr_perf_pmu_disable(&pmu->pmu); in ddr_pmu_overflow_hander()
787 cn10k_ddr_perf_pmu_enable(&pmu->pmu); in ddr_pmu_overflow_hander()
790 static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu, in ddr_pmu_ody_enable_read_freerun() argument
793 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_enable_read_freerun()
796 val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_read_freerun()
802 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_read_freerun()
805 static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu, in ddr_pmu_ody_enable_write_freerun() argument
808 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_enable_write_freerun()
811 val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_write_freerun()
817 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); in ddr_pmu_ody_enable_write_freerun()
820 static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_ody_read_clear_freerun() argument
822 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_read_clear_freerun()
826 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr); in ddr_pmu_ody_read_clear_freerun()
829 static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu) in ddr_pmu_ody_write_clear_freerun() argument
831 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in ddr_pmu_ody_write_clear_freerun()
835 writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr); in ddr_pmu_ody_write_clear_freerun()
838 static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx) in ddr_pmu_ody_overflow_hander() argument
845 cn10k_ddr_perf_event_update(pmu->events[evt_idx]); in ddr_pmu_ody_overflow_hander()
846 cn10k_ddr_perf_counter_stop(pmu, evt_idx); in ddr_pmu_ody_overflow_hander()
847 cn10k_ddr_perf_counter_start(pmu, evt_idx); in ddr_pmu_ody_overflow_hander()
850 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) in cn10k_ddr_pmu_overflow_handler() argument
852 const struct ddr_pmu_platform_data *p_data = pmu->p_data; in cn10k_ddr_pmu_overflow_handler()
853 const struct ddr_pmu_ops *ops = pmu->ops; in cn10k_ddr_pmu_overflow_handler()
860 event = pmu->events[DDRC_PERF_READ_COUNTER_IDX]; in cn10k_ddr_pmu_overflow_handler()
864 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler()
873 event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX]; in cn10k_ddr_pmu_overflow_handler()
877 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler()
887 if (pmu->events[i] == NULL) in cn10k_ddr_pmu_overflow_handler()
890 value = cn10k_ddr_perf_read_counter(pmu, i); in cn10k_ddr_pmu_overflow_handler()
893 ops->pmu_overflow_handler(pmu, i); in cn10k_ddr_pmu_overflow_handler()
902 struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu, in cn10k_ddr_pmu_timer_handler() local
907 cn10k_ddr_pmu_overflow_handler(pmu); in cn10k_ddr_pmu_timer_handler()
916 struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu, in cn10k_ddr_pmu_offline_cpu() local
920 if (cpu != pmu->cpu) in cn10k_ddr_pmu_offline_cpu()
927 perf_pmu_migrate_context(&pmu->pmu, cpu, target); in cn10k_ddr_pmu_offline_cpu()
928 pmu->cpu = target; in cn10k_ddr_pmu_offline_cpu()
1022 /* Setup the PMU counter to work in manual mode */ in cn10k_ddr_perf_probe()
1026 ddr_pmu->pmu = (struct pmu) { in cn10k_ddr_perf_probe()
1045 ddr_pmu->pmu = (struct pmu) { in cn10k_ddr_perf_probe()
1074 ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); in cn10k_ddr_perf_probe()
1078 pr_info("DDR PMU Driver for ddrc@%llx\n", res->start); in cn10k_ddr_perf_probe()
1095 perf_pmu_unregister(&ddr_pmu->pmu); in cn10k_ddr_perf_remove()
1100 { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
1117 .name = "cn10k-ddr-pmu",