Lines Matching +full:two +full:- +full:lane

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2021-2023 Alibaba Inc.
26 * Event Counter Data Select includes two parts:
27 * - 27-24: Group number(4-bit: 0..0x7)
28 * - 23-16: Event number(8-bit: 0..0x13) within the Group
65 #define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
66 #define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
67 #define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
128 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); in cpumask_show()
147 PMU_FORMAT_ATTR(eventid, "config:0-15");
148 PMU_FORMAT_ATTR(type, "config:16-19");
149 PMU_FORMAT_ATTR(lane, "config:20-27");
167 u8 lane; member
177 if (eattr->type == DWC_PCIE_LANE_EVENT) in dwc_pcie_event_show()
178 return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", in dwc_pcie_event_show()
179 eattr->eventid, eattr->type); in dwc_pcie_event_show()
180 else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) in dwc_pcie_event_show()
182 eattr->eventid, eattr->type); in dwc_pcie_event_show()
192 .lane = _lane, \
220 * Leave it to the user to specify the lane ID to avoid generating
270 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_lane_event_enable()
271 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_lane_event_enable()
286 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_time_based_event_enable()
287 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_time_based_event_enable()
296 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_read_lane_event_counter()
297 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_read_lane_event_counter()
298 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_read_lane_event_counter()
308 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_read_time_based_counter()
309 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_read_time_based_counter()
311 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_read_time_based_counter()
316 * The 64-bit value of the data counter is spread across two in dwc_pcie_pmu_read_time_based_counter()
337 * The Group#1 event measures the amount of data processed in 16-byte in dwc_pcie_pmu_read_time_based_counter()
338 * units. Simplify the end-user interface by multiplying the counter in dwc_pcie_pmu_read_time_based_counter()
349 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_update()
354 prev = local64_read(&hwc->prev_count); in dwc_pcie_pmu_event_update()
361 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); in dwc_pcie_pmu_event_update()
363 delta = (now - prev) & DWC_PCIE_MAX_PERIOD; in dwc_pcie_pmu_event_update()
364 /* 32-bit counter for Lane Event Counting */ in dwc_pcie_pmu_event_update()
368 local64_add(delta, &event->count); in dwc_pcie_pmu_event_update()
373 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_init()
376 u32 lane; in dwc_pcie_pmu_event_init() local
378 if (event->attr.type != event->pmu->type) in dwc_pcie_pmu_event_init()
379 return -ENOENT; in dwc_pcie_pmu_event_init()
383 return -EINVAL; in dwc_pcie_pmu_event_init()
386 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) in dwc_pcie_pmu_event_init()
387 return -EINVAL; in dwc_pcie_pmu_event_init()
389 if (event->group_leader != event && in dwc_pcie_pmu_event_init()
390 !is_software_event(event->group_leader)) in dwc_pcie_pmu_event_init()
391 return -EINVAL; in dwc_pcie_pmu_event_init()
393 for_each_sibling_event(sibling, event->group_leader) { in dwc_pcie_pmu_event_init()
394 if (sibling->pmu != event->pmu && !is_software_event(sibling)) in dwc_pcie_pmu_event_init()
395 return -EINVAL; in dwc_pcie_pmu_event_init()
399 return -EINVAL; in dwc_pcie_pmu_event_init()
402 lane = DWC_PCIE_EVENT_LANE(event); in dwc_pcie_pmu_event_init()
403 if (lane < 0 || lane >= pcie_pmu->nr_lanes) in dwc_pcie_pmu_event_init()
404 return -EINVAL; in dwc_pcie_pmu_event_init()
407 event->cpu = pcie_pmu->on_cpu; in dwc_pcie_pmu_event_init()
414 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_start()
415 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_start()
418 hwc->state = 0; in dwc_pcie_pmu_event_start()
419 local64_set(&hwc->prev_count, 0); in dwc_pcie_pmu_event_start()
429 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_stop()
431 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_stop()
433 if (event->hw.state & PERF_HES_STOPPED) in dwc_pcie_pmu_event_stop()
442 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in dwc_pcie_pmu_event_stop()
447 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_add()
448 struct pci_dev *pdev = pcie_pmu->pdev; in dwc_pcie_pmu_event_add()
449 struct hw_perf_event *hwc = &event->hw; in dwc_pcie_pmu_event_add()
452 int lane = DWC_PCIE_EVENT_LANE(event); in dwc_pcie_pmu_event_add() local
453 u16 ras_des_offset = pcie_pmu->ras_des_offset; in dwc_pcie_pmu_event_add()
457 if (pcie_pmu->event[type]) in dwc_pcie_pmu_event_add()
458 return -ENOSPC; in dwc_pcie_pmu_event_add()
460 pcie_pmu->event[type] = event; in dwc_pcie_pmu_event_add()
461 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in dwc_pcie_pmu_event_add()
466 FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | in dwc_pcie_pmu_event_add()
495 struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); in dwc_pcie_pmu_event_del()
500 pcie_pmu->event[type] = NULL; in dwc_pcie_pmu_event_del()
517 if (dev_info->pdev == pdev) in dwc_pcie_find_dev_info()
527 perf_pmu_unregister(&pcie_pmu->pmu); in dwc_pcie_unregister_pmu()
539 for (vid = dwc_pcie_pmu_vsec_ids; vid->vendor_id; vid++) { in dwc_pcie_des_cap()
540 vsec = pci_find_vsec_capability(pdev, vid->vendor_id, in dwc_pcie_des_cap()
541 vid->vsec_id); in dwc_pcie_des_cap()
545 if (PCI_VNDR_HEADER_REV(val) == vid->vsec_rev) { in dwc_pcie_des_cap()
546 pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); in dwc_pcie_des_cap()
556 platform_device_unregister(dev_info->plat_dev); in dwc_pcie_unregister_dev()
557 list_del(&dev_info->dev_node); in dwc_pcie_unregister_dev()
567 sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); in dwc_pcie_register_dev()
575 return -ENOMEM; in dwc_pcie_register_dev()
579 dev_info->plat_dev = plat_dev; in dwc_pcie_register_dev()
580 dev_info->pdev = pdev; in dwc_pcie_register_dev()
581 list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); in dwc_pcie_register_dev()
624 sbdf = plat_dev->id; in dwc_pcie_pmu_probe()
629 return -ENODEV; in dwc_pcie_pmu_probe()
634 return -ENODEV; in dwc_pcie_pmu_probe()
637 name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf); in dwc_pcie_pmu_probe()
639 return -ENOMEM; in dwc_pcie_pmu_probe()
641 pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); in dwc_pcie_pmu_probe()
643 return -ENOMEM; in dwc_pcie_pmu_probe()
645 pcie_pmu->pdev = pdev; in dwc_pcie_pmu_probe()
646 pcie_pmu->ras_des_offset = vsec; in dwc_pcie_pmu_probe()
647 pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); in dwc_pcie_pmu_probe()
648 pcie_pmu->on_cpu = -1; in dwc_pcie_pmu_probe()
649 pcie_pmu->pmu = (struct pmu){ in dwc_pcie_pmu_probe()
651 .parent = &plat_dev->dev, in dwc_pcie_pmu_probe()
666 &pcie_pmu->cpuhp_node); in dwc_pcie_pmu_probe()
673 ret = devm_add_action_or_reset(&plat_dev->dev, in dwc_pcie_pmu_probe()
675 &pcie_pmu->cpuhp_node); in dwc_pcie_pmu_probe()
679 ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); in dwc_pcie_pmu_probe()
684 ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, in dwc_pcie_pmu_probe()
697 if (pcie_pmu->on_cpu == -1) in dwc_pcie_pmu_online_cpu()
698 pcie_pmu->on_cpu = cpumask_local_spread( in dwc_pcie_pmu_online_cpu()
699 0, dev_to_node(&pcie_pmu->pdev->dev)); in dwc_pcie_pmu_online_cpu()
713 if (cpu != pcie_pmu->on_cpu) in dwc_pcie_pmu_offline_cpu()
716 pcie_pmu->on_cpu = -1; in dwc_pcie_pmu_offline_cpu()
717 pdev = pcie_pmu->pdev; in dwc_pcie_pmu_offline_cpu()
718 node = dev_to_node(&pdev->dev); in dwc_pcie_pmu_offline_cpu()
730 perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); in dwc_pcie_pmu_offline_cpu()
731 pcie_pmu->on_cpu = target; in dwc_pcie_pmu_offline_cpu()