Lines Matching +full:ats +full:- +full:supported
1 // SPDX-License-Identifier: GPL-2.0-only
13 PMU_FORMAT_ATTR(event, "config:0-27"); /* ES: Events Select */
14 PMU_FORMAT_ATTR(event_group, "config:28-31"); /* EGI: Event Group Index */
68 return (iommu_pmu->filter & _filter) ? attr->mode : 0; \
82 IOMMU_PMU_ATTR(filter_requester_id, "config1:16-31", IOMMU_PMU_FILTER_REQUESTER_ID);
83 IOMMU_PMU_ATTR(filter_domain, "config1:32-47", IOMMU_PMU_FILTER_DOMAIN);
84 IOMMU_PMU_ATTR(filter_pasid, "config2:0-21", IOMMU_PMU_FILTER_PASID);
85 IOMMU_PMU_ATTR(filter_ats, "config2:24-28", IOMMU_PMU_FILTER_ATS);
86 IOMMU_PMU_ATTR(filter_page_table, "config2:32-36", IOMMU_PMU_FILTER_PAGE_TABLE);
101 if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) { \
102 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
104 (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET, \
111 if (iommu_pmu->filter & _filter) { \
112 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
114 (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET, \
142 return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0; \
217 return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride; in iommu_event_base()
223 return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET; in iommu_config_base()
228 return container_of(event->pmu, struct iommu_pmu, pmu); in iommu_event_to_pmu()
233 u64 config = event->attr.config; in iommu_event_config()
243 return event->pmu == &iommu_pmu->pmu; in is_iommu_pmu_event()
249 u32 event_group = iommu_event_group(event->attr.config); in iommu_pmu_validate_event()
251 if (event_group >= iommu_pmu->num_eg) in iommu_pmu_validate_event()
252 return -EINVAL; in iommu_pmu_validate_event()
267 for_each_sibling_event(sibling, event->group_leader) { in iommu_pmu_validate_group()
269 sibling->state <= PERF_EVENT_STATE_OFF) in iommu_pmu_validate_group()
272 if (++nr > iommu_pmu->num_cntr) in iommu_pmu_validate_group()
273 return -EINVAL; in iommu_pmu_validate_group()
281 struct hw_perf_event *hwc = &event->hw; in iommu_pmu_event_init()
283 if (event->attr.type != event->pmu->type) in iommu_pmu_event_init()
284 return -ENOENT; in iommu_pmu_event_init()
286 /* sampling not supported */ in iommu_pmu_event_init()
287 if (event->attr.sample_period) in iommu_pmu_event_init()
288 return -EINVAL; in iommu_pmu_event_init()
290 if (event->cpu < 0) in iommu_pmu_event_init()
291 return -EINVAL; in iommu_pmu_event_init()
294 return -EINVAL; in iommu_pmu_event_init()
296 hwc->config = iommu_event_config(event); in iommu_pmu_event_init()
304 struct hw_perf_event *hwc = &event->hw; in iommu_pmu_event_update()
306 int shift = 64 - iommu_pmu->cntr_width; in iommu_pmu_event_update()
309 prev_count = local64_read(&hwc->prev_count); in iommu_pmu_event_update()
310 new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx)); in iommu_pmu_event_update()
311 if (local64_xchg(&hwc->prev_count, new_count) != prev_count) in iommu_pmu_event_update()
318 delta = (new_count << shift) - (prev_count << shift); in iommu_pmu_event_update()
321 local64_add(delta, &event->count); in iommu_pmu_event_update()
327 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_start()
328 struct hw_perf_event *hwc = &event->hw; in iommu_pmu_start()
331 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) in iommu_pmu_start()
334 if (WARN_ON_ONCE(hwc->idx < 0 || hwc->idx >= IOMMU_PMU_IDX_MAX)) in iommu_pmu_start()
338 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in iommu_pmu_start()
340 hwc->state = 0; in iommu_pmu_start()
343 count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx)); in iommu_pmu_start()
344 local64_set((&hwc->prev_count), count); in iommu_pmu_start()
348 * - The existing perf_event subsystem doesn't handle the error. in iommu_pmu_start()
351 * - It's a corner case caused by HW, which is very unlikely to in iommu_pmu_start()
353 * - The worst case is that the user will get <not count> with in iommu_pmu_start()
356 ecmd_submit_sync(iommu, DMA_ECMD_ENABLE, hwc->idx, 0); in iommu_pmu_start()
364 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_stop()
365 struct hw_perf_event *hwc = &event->hw; in iommu_pmu_stop()
367 if (!(hwc->state & PERF_HES_STOPPED)) { in iommu_pmu_stop()
368 ecmd_submit_sync(iommu, DMA_ECMD_DISABLE, hwc->idx, 0); in iommu_pmu_stop()
372 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in iommu_pmu_stop()
380 u32 event_group = iommu_event_group(event->attr.config); in iommu_pmu_validate_per_cntr_event()
381 u32 select = iommu_event_select(event->attr.config); in iommu_pmu_validate_per_cntr_event()
383 if (!(iommu_pmu->cntr_evcap[idx][event_group] & select)) in iommu_pmu_validate_per_cntr_event()
384 return -EINVAL; in iommu_pmu_validate_per_cntr_event()
392 struct hw_perf_event *hwc = &event->hw; in iommu_pmu_assign_event()
399 for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) { in iommu_pmu_assign_event()
400 if (test_and_set_bit(idx, iommu_pmu->used_mask)) in iommu_pmu_assign_event()
402 /* Check per-counter event capabilities */ in iommu_pmu_assign_event()
405 clear_bit(idx, iommu_pmu->used_mask); in iommu_pmu_assign_event()
408 return -EINVAL; in iommu_pmu_assign_event()
410 iommu_pmu->event_list[idx] = event; in iommu_pmu_assign_event()
411 hwc->idx = idx; in iommu_pmu_assign_event()
414 dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config); in iommu_pmu_assign_event()
416 iommu_pmu_set_filter(requester_id, event->attr.config1, in iommu_pmu_assign_event()
418 event->attr.config1); in iommu_pmu_assign_event()
419 iommu_pmu_set_filter(domain, event->attr.config1, in iommu_pmu_assign_event()
421 event->attr.config1); in iommu_pmu_assign_event()
422 iommu_pmu_set_filter(pasid, event->attr.config2, in iommu_pmu_assign_event()
424 event->attr.config1); in iommu_pmu_assign_event()
425 iommu_pmu_set_filter(ats, event->attr.config2, in iommu_pmu_assign_event()
427 event->attr.config1); in iommu_pmu_assign_event()
428 iommu_pmu_set_filter(page_table, event->attr.config2, in iommu_pmu_assign_event()
430 event->attr.config1); in iommu_pmu_assign_event()
438 struct hw_perf_event *hwc = &event->hw; in iommu_pmu_add()
445 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in iommu_pmu_add()
456 int idx = event->hw.idx; in iommu_pmu_del()
466 iommu_pmu->event_list[idx] = NULL; in iommu_pmu_del()
467 event->hw.idx = -1; in iommu_pmu_del()
468 clear_bit(idx, iommu_pmu->used_mask); in iommu_pmu_del()
476 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_enable()
484 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_disable()
499 while ((status = dmar_readq(iommu_pmu->overflow))) { in iommu_pmu_counter_overflow()
500 for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) { in iommu_pmu_counter_overflow()
503 * Accumulate the value into the event->count. in iommu_pmu_counter_overflow()
505 event = iommu_pmu->event_list[i]; in iommu_pmu_counter_overflow()
513 dmar_writeq(iommu_pmu->overflow, status); in iommu_pmu_counter_overflow()
521 if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG)) in iommu_pmu_irq_handler()
524 iommu_pmu_counter_overflow(iommu->pmu); in iommu_pmu_irq_handler()
527 dmar_writel(iommu->reg + DMAR_PERFINTRSTS_REG, DMA_PERFINTRSTS_PIS); in iommu_pmu_irq_handler()
534 struct iommu_pmu *iommu_pmu = iommu->pmu; in __iommu_pmu_register()
536 iommu_pmu->pmu.name = iommu->name; in __iommu_pmu_register()
537 iommu_pmu->pmu.task_ctx_nr = perf_invalid_context; in __iommu_pmu_register()
538 iommu_pmu->pmu.event_init = iommu_pmu_event_init; in __iommu_pmu_register()
539 iommu_pmu->pmu.pmu_enable = iommu_pmu_enable; in __iommu_pmu_register()
540 iommu_pmu->pmu.pmu_disable = iommu_pmu_disable; in __iommu_pmu_register()
541 iommu_pmu->pmu.add = iommu_pmu_add; in __iommu_pmu_register()
542 iommu_pmu->pmu.del = iommu_pmu_del; in __iommu_pmu_register()
543 iommu_pmu->pmu.start = iommu_pmu_start; in __iommu_pmu_register()
544 iommu_pmu->pmu.stop = iommu_pmu_stop; in __iommu_pmu_register()
545 iommu_pmu->pmu.read = iommu_pmu_event_update; in __iommu_pmu_register()
546 iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups; in __iommu_pmu_register()
547 iommu_pmu->pmu.attr_update = iommu_pmu_attr_update; in __iommu_pmu_register()
548 iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; in __iommu_pmu_register()
549 iommu_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE; in __iommu_pmu_register()
550 iommu_pmu->pmu.module = THIS_MODULE; in __iommu_pmu_register()
552 return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1); in __iommu_pmu_register()
558 u32 off = dmar_readl(iommu->reg + offset); in get_perf_reg_address()
560 return iommu->reg + off; in get_perf_reg_address()
570 if (!ecap_pms(iommu->ecap)) in alloc_iommu_pmu()
574 if (!cap_ecmds(iommu->cap)) in alloc_iommu_pmu()
575 return -ENODEV; in alloc_iommu_pmu()
577 perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG); in alloc_iommu_pmu()
578 /* The performance monitoring is not supported. */ in alloc_iommu_pmu()
580 return -ENODEV; in alloc_iommu_pmu()
584 return -ENODEV; in alloc_iommu_pmu()
588 return -ENODEV; in alloc_iommu_pmu()
592 return -ENODEV; in alloc_iommu_pmu()
596 return -ENOMEM; in alloc_iommu_pmu()
598 iommu_pmu->num_cntr = pcap_num_cntr(perfcap); in alloc_iommu_pmu()
599 if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) { in alloc_iommu_pmu()
601 iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX); in alloc_iommu_pmu()
602 iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX; in alloc_iommu_pmu()
605 iommu_pmu->cntr_width = pcap_cntr_width(perfcap); in alloc_iommu_pmu()
606 iommu_pmu->filter = pcap_filters_mask(perfcap); in alloc_iommu_pmu()
607 iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap); in alloc_iommu_pmu()
608 iommu_pmu->num_eg = pcap_num_event_group(perfcap); in alloc_iommu_pmu()
610 iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL); in alloc_iommu_pmu()
611 if (!iommu_pmu->evcap) { in alloc_iommu_pmu()
612 ret = -ENOMEM; in alloc_iommu_pmu()
617 for (i = 0; i < iommu_pmu->num_eg; i++) { in alloc_iommu_pmu()
620 pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG + in alloc_iommu_pmu()
622 iommu_pmu->evcap[i] = pecap_es(pcap); in alloc_iommu_pmu()
625 iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL); in alloc_iommu_pmu()
626 if (!iommu_pmu->cntr_evcap) { in alloc_iommu_pmu()
627 ret = -ENOMEM; in alloc_iommu_pmu()
630 for (i = 0; i < iommu_pmu->num_cntr; i++) { in alloc_iommu_pmu()
631 iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL); in alloc_iommu_pmu()
632 if (!iommu_pmu->cntr_evcap[i]) { in alloc_iommu_pmu()
633 ret = -ENOMEM; in alloc_iommu_pmu()
638 * to per-counter capabilities later. in alloc_iommu_pmu()
640 for (j = 0; j < iommu_pmu->num_eg; j++) in alloc_iommu_pmu()
641 iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j]; in alloc_iommu_pmu()
644 iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG); in alloc_iommu_pmu()
645 iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG); in alloc_iommu_pmu()
646 iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG); in alloc_iommu_pmu()
649 * Check per-counter capabilities. All counters should have the in alloc_iommu_pmu()
653 for (i = 0; i < iommu_pmu->num_cntr; i++) { in alloc_iommu_pmu()
654 cap = dmar_readl(iommu_pmu->cfg_reg + in alloc_iommu_pmu()
665 if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) || in alloc_iommu_pmu()
667 iommu_pmu->num_cntr = i; in alloc_iommu_pmu()
669 iommu_pmu->num_cntr); in alloc_iommu_pmu()
672 /* Clear the pre-defined events group */ in alloc_iommu_pmu()
673 for (j = 0; j < iommu_pmu->num_eg; j++) in alloc_iommu_pmu()
674 iommu_pmu->cntr_evcap[i][j] = 0; in alloc_iommu_pmu()
676 /* Override with per-counter event capabilities */ in alloc_iommu_pmu()
678 cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET + in alloc_iommu_pmu()
681 iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap); in alloc_iommu_pmu()
683 * Some events may only be supported by a specific counter. in alloc_iommu_pmu()
686 iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap); in alloc_iommu_pmu()
690 iommu_pmu->iommu = iommu; in alloc_iommu_pmu()
691 iommu->pmu = iommu_pmu; in alloc_iommu_pmu()
696 for (i = 0; i < iommu_pmu->num_cntr; i++) in alloc_iommu_pmu()
697 kfree(iommu_pmu->cntr_evcap[i]); in alloc_iommu_pmu()
698 kfree(iommu_pmu->cntr_evcap); in alloc_iommu_pmu()
700 kfree(iommu_pmu->evcap); in alloc_iommu_pmu()
709 struct iommu_pmu *iommu_pmu = iommu->pmu; in free_iommu_pmu()
714 if (iommu_pmu->evcap) { in free_iommu_pmu()
717 for (i = 0; i < iommu_pmu->num_cntr; i++) in free_iommu_pmu()
718 kfree(iommu_pmu->cntr_evcap[i]); in free_iommu_pmu()
719 kfree(iommu_pmu->cntr_evcap); in free_iommu_pmu()
721 kfree(iommu_pmu->evcap); in free_iommu_pmu()
723 iommu->pmu = NULL; in free_iommu_pmu()
728 struct iommu_pmu *iommu_pmu = iommu->pmu; in iommu_pmu_set_interrupt()
731 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PERF + iommu->seq_id, iommu->node, iommu); in iommu_pmu_set_interrupt()
733 return -EINVAL; in iommu_pmu_set_interrupt()
735 snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id); in iommu_pmu_set_interrupt()
737 iommu->perf_irq = irq; in iommu_pmu_set_interrupt()
739 IRQF_ONESHOT, iommu_pmu->irq_name, iommu); in iommu_pmu_set_interrupt()
742 iommu->perf_irq = 0; in iommu_pmu_set_interrupt()
750 if (!iommu->perf_irq) in iommu_pmu_unset_interrupt()
753 free_irq(iommu->perf_irq, iommu); in iommu_pmu_unset_interrupt()
754 dmar_free_hwirq(iommu->perf_irq); in iommu_pmu_unset_interrupt()
755 iommu->perf_irq = 0; in iommu_pmu_unset_interrupt()
760 struct iommu_pmu *iommu_pmu = iommu->pmu; in iommu_pmu_register()
775 perf_pmu_unregister(&iommu_pmu->pmu); in iommu_pmu_register()
777 pr_err("Failed to register PMU for iommu (seq_id = %d)\n", iommu->seq_id); in iommu_pmu_register()
783 struct iommu_pmu *iommu_pmu = iommu->pmu; in iommu_pmu_unregister()
789 perf_pmu_unregister(&iommu_pmu->pmu); in iommu_pmu_unregister()