Lines Matching +full:msi +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
15 #include <linux/msi.h>
17 #include <linux/pci-ecam.h>
20 #include "pcie-plda.h"
25 struct plda_pcie_rp *pcie = bus->sysdata; in plda_pcie_map_bus()
27 return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); in plda_pcie_map_bus()
35 struct device *dev = port->dev; in plda_handle_msi()
36 struct plda_msi *msi = &port->msi; in plda_handle_msi() local
37 void __iomem *bridge_base_addr = port->bridge_addr; in plda_handle_msi()
49 for_each_set_bit(bit, &status, msi->num_vectors) { in plda_handle_msi()
50 ret = generic_handle_domain_irq(msi->dev_domain, bit); in plda_handle_msi()
52 dev_err_ratelimited(dev, "bad MSI IRQ %d\n", in plda_handle_msi()
63 void __iomem *bridge_base_addr = port->bridge_addr; in plda_msi_bottom_irq_ack()
64 u32 bitpos = data->hwirq; in plda_msi_bottom_irq_ack()
72 phys_addr_t addr = port->msi.vector_phy; in plda_compose_msi_msg()
74 msg->address_lo = lower_32_bits(addr); in plda_compose_msi_msg()
75 msg->address_hi = upper_32_bits(addr); in plda_compose_msi_msg()
76 msg->data = data->hwirq; in plda_compose_msi_msg()
78 dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n", in plda_compose_msi_msg()
79 (int)data->hwirq, msg->address_hi, msg->address_lo); in plda_compose_msi_msg()
83 .name = "PLDA MSI",
93 struct plda_pcie_rp *port = domain->host_data; in plda_irq_msi_domain_alloc()
94 struct plda_msi *msi = &port->msi; in plda_irq_msi_domain_alloc() local
97 mutex_lock(&msi->lock); in plda_irq_msi_domain_alloc()
98 bit = find_first_zero_bit(msi->used, msi->num_vectors); in plda_irq_msi_domain_alloc()
99 if (bit >= msi->num_vectors) { in plda_irq_msi_domain_alloc()
100 mutex_unlock(&msi->lock); in plda_irq_msi_domain_alloc()
101 return -ENOSPC; in plda_irq_msi_domain_alloc()
104 set_bit(bit, msi->used); in plda_irq_msi_domain_alloc()
107 domain->host_data, handle_edge_irq, NULL, NULL); in plda_irq_msi_domain_alloc()
109 mutex_unlock(&msi->lock); in plda_irq_msi_domain_alloc()
120 struct plda_msi *msi = &port->msi; in plda_irq_msi_domain_free() local
122 mutex_lock(&msi->lock); in plda_irq_msi_domain_free()
124 if (test_bit(d->hwirq, msi->used)) in plda_irq_msi_domain_free()
125 __clear_bit(d->hwirq, msi->used); in plda_irq_msi_domain_free()
127 dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq); in plda_irq_msi_domain_free()
129 mutex_unlock(&msi->lock); in plda_irq_msi_domain_free()
138 .name = "PLDA PCIe MSI",
152 struct device *dev = port->dev; in plda_allocate_msi_domains()
153 struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); in plda_allocate_msi_domains()
154 struct plda_msi *msi = &port->msi; in plda_allocate_msi_domains() local
156 mutex_init(&port->msi.lock); in plda_allocate_msi_domains()
158 msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors, in plda_allocate_msi_domains()
160 if (!msi->dev_domain) { in plda_allocate_msi_domains()
162 return -ENOMEM; in plda_allocate_msi_domains()
165 msi->msi_domain = pci_msi_create_irq_domain(fwnode, in plda_allocate_msi_domains()
167 msi->dev_domain); in plda_allocate_msi_domains()
168 if (!msi->msi_domain) { in plda_allocate_msi_domains()
169 dev_err(dev, "failed to create MSI domain\n"); in plda_allocate_msi_domains()
170 irq_domain_remove(msi->dev_domain); in plda_allocate_msi_domains()
171 return -ENOMEM; in plda_allocate_msi_domains()
181 struct device *dev = port->dev; in plda_handle_intx()
182 void __iomem *bridge_base_addr = port->bridge_addr; in plda_handle_intx()
194 ret = generic_handle_domain_irq(port->intx_domain, bit); in plda_handle_intx()
207 void __iomem *bridge_base_addr = port->bridge_addr; in plda_ack_intx_irq()
208 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); in plda_ack_intx_irq()
216 void __iomem *bridge_base_addr = port->bridge_addr; in plda_mask_intx_irq()
218 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); in plda_mask_intx_irq()
221 raw_spin_lock_irqsave(&port->lock, flags); in plda_mask_intx_irq()
225 raw_spin_unlock_irqrestore(&port->lock, flags); in plda_mask_intx_irq()
231 void __iomem *bridge_base_addr = port->bridge_addr; in plda_unmask_intx_irq()
233 u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT); in plda_unmask_intx_irq()
236 raw_spin_lock_irqsave(&port->lock, flags); in plda_unmask_intx_irq()
240 raw_spin_unlock_irqrestore(&port->lock, flags); in plda_unmask_intx_irq()
254 irq_set_chip_data(irq, domain->host_data); in plda_pcie_intx_map()
260 .map = plda_pcie_intx_map,
267 origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL); in plda_get_events()
269 /* MSI event and sys events */ in plda_get_events()
271 events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1); in plda_get_events()
297 events = port->event_ops->get_events(port); in plda_handle_event()
299 events &= port->events_bitmap; in plda_handle_event()
300 for_each_set_bit(bit, &events, port->num_events) in plda_handle_event()
301 generic_handle_domain_irq(port->event_domain, bit); in plda_handle_event()
310 /* hwirq 23 - 0 are the same with register */ in plda_hwirq_to_mask()
316 mask = BIT(hwirq + PCI_NUM_INTX - 1); in plda_hwirq_to_mask()
325 writel_relaxed(plda_hwirq_to_mask(data->hwirq), in plda_ack_event_irq()
326 port->bridge_addr + ISTATUS_LOCAL); in plda_ack_event_irq()
334 mask = plda_hwirq_to_mask(data->hwirq); in plda_mask_event_irq()
336 raw_spin_lock(&port->lock); in plda_mask_event_irq()
337 val = readl_relaxed(port->bridge_addr + IMASK_LOCAL); in plda_mask_event_irq()
339 writel_relaxed(val, port->bridge_addr + IMASK_LOCAL); in plda_mask_event_irq()
340 raw_spin_unlock(&port->lock); in plda_mask_event_irq()
348 mask = plda_hwirq_to_mask(data->hwirq); in plda_unmask_event_irq()
350 raw_spin_lock(&port->lock); in plda_unmask_event_irq()
351 val = readl_relaxed(port->bridge_addr + IMASK_LOCAL); in plda_unmask_event_irq()
353 writel_relaxed(val, port->bridge_addr + IMASK_LOCAL); in plda_unmask_event_irq()
354 raw_spin_unlock(&port->lock); in plda_unmask_event_irq()
371 struct plda_pcie_rp *port = (void *)domain->host_data; in plda_pcie_event_map()
373 irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq); in plda_pcie_event_map()
374 irq_set_chip_data(irq, domain->host_data); in plda_pcie_event_map()
380 .map = plda_pcie_event_map,
385 struct device *dev = port->dev; in plda_pcie_init_irq_domains()
386 struct device_node *node = dev->of_node; in plda_pcie_init_irq_domains()
393 return -EINVAL; in plda_pcie_init_irq_domains()
396 port->event_domain = irq_domain_add_linear(pcie_intc_node, in plda_pcie_init_irq_domains()
397 port->num_events, in plda_pcie_init_irq_domains()
400 if (!port->event_domain) { in plda_pcie_init_irq_domains()
403 return -ENOMEM; in plda_pcie_init_irq_domains()
406 irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS); in plda_pcie_init_irq_domains()
408 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, in plda_pcie_init_irq_domains()
410 if (!port->intx_domain) { in plda_pcie_init_irq_domains()
413 return -ENOMEM; in plda_pcie_init_irq_domains()
416 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); in plda_pcie_init_irq_domains()
419 raw_spin_lock_init(&port->lock); in plda_pcie_init_irq_domains()
428 struct device *dev = &pdev->dev; in plda_init_interrupts()
432 if (!port->event_ops) in plda_init_interrupts()
433 port->event_ops = &plda_event_ops; in plda_init_interrupts()
435 if (!port->event_irq_chip) in plda_init_interrupts()
436 port->event_irq_chip = &plda_event_irq_chip; in plda_init_interrupts()
444 port->irq = platform_get_irq(pdev, 0); in plda_init_interrupts()
445 if (port->irq < 0) in plda_init_interrupts()
446 return -ENODEV; in plda_init_interrupts()
448 for_each_set_bit(i, &port->events_bitmap, port->num_events) { in plda_init_interrupts()
449 event_irq = irq_create_mapping(port->event_domain, i); in plda_init_interrupts()
451 dev_err(dev, "failed to map hwirq %d\n", i); in plda_init_interrupts()
452 return -ENXIO; in plda_init_interrupts()
455 if (event->request_event_irq) in plda_init_interrupts()
456 ret = event->request_event_irq(port, event_irq, i); in plda_init_interrupts()
468 port->intx_irq = irq_create_mapping(port->event_domain, in plda_init_interrupts()
469 event->intx_event); in plda_init_interrupts()
470 if (!port->intx_irq) { in plda_init_interrupts()
471 dev_err(dev, "failed to map INTx interrupt\n"); in plda_init_interrupts()
472 return -ENXIO; in plda_init_interrupts()
476 irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port); in plda_init_interrupts()
478 port->msi_irq = irq_create_mapping(port->event_domain, in plda_init_interrupts()
479 event->msi_event); in plda_init_interrupts()
480 if (!port->msi_irq) in plda_init_interrupts()
481 return -ENXIO; in plda_init_interrupts()
483 /* Plug the MSI chained handler */ in plda_init_interrupts()
484 irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port); in plda_init_interrupts()
487 irq_set_chained_handler_and_data(port->irq, plda_handle_event, port); in plda_init_interrupts()
497 u32 atr_sz = ilog2(size) - 1; in plda_pcie_setup_window()
530 void __iomem *bridge_base_addr = port->bridge_addr; in plda_pcie_setup_inbound_address_translation()
543 void __iomem *bridge_base_addr = port->bridge_addr; in plda_pcie_setup_iomems()
548 resource_list_for_each_entry(entry, &bridge->windows) { in plda_pcie_setup_iomems()
549 if (resource_type(entry->res) == IORESOURCE_MEM) { in plda_pcie_setup_iomems()
550 pci_addr = entry->res->start - entry->offset; in plda_pcie_setup_iomems()
552 entry->res->start, pci_addr, in plda_pcie_setup_iomems()
553 resource_size(entry->res)); in plda_pcie_setup_iomems()
564 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); in plda_pcie_irq_domain_deinit()
565 irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL); in plda_pcie_irq_domain_deinit()
566 irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL); in plda_pcie_irq_domain_deinit()
568 irq_domain_remove(pcie->msi.msi_domain); in plda_pcie_irq_domain_deinit()
569 irq_domain_remove(pcie->msi.dev_domain); in plda_pcie_irq_domain_deinit()
571 irq_domain_remove(pcie->intx_domain); in plda_pcie_irq_domain_deinit()
572 irq_domain_remove(pcie->event_domain); in plda_pcie_irq_domain_deinit()
578 struct device *dev = port->dev; in plda_pcie_host_init()
586 port->bridge_addr = in plda_pcie_host_init()
589 if (IS_ERR(port->bridge_addr)) in plda_pcie_host_init()
590 return dev_err_probe(dev, PTR_ERR(port->bridge_addr), in plda_pcie_host_init()
591 "failed to map reg memory\n"); in plda_pcie_host_init()
595 return dev_err_probe(dev, -ENODEV, in plda_pcie_host_init()
598 port->config_base = devm_ioremap_resource(dev, cfg_res); in plda_pcie_host_init()
599 if (IS_ERR(port->config_base)) in plda_pcie_host_init()
600 return dev_err_probe(dev, PTR_ERR(port->config_base), in plda_pcie_host_init()
601 "failed to map config memory\n"); in plda_pcie_host_init()
605 return dev_err_probe(dev, -ENOMEM, in plda_pcie_host_init()
608 if (port->host_ops && port->host_ops->host_init) { in plda_pcie_host_init()
609 ret = port->host_ops->host_init(port); in plda_pcie_host_init()
614 port->bridge = bridge; in plda_pcie_host_init()
615 plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0, in plda_pcie_host_init()
618 plda_set_default_msi(&port->msi); in plda_pcie_host_init()
624 bridge->ops = ops; in plda_pcie_host_init()
625 bridge->sysdata = port; in plda_pcie_host_init()
638 if (port->host_ops && port->host_ops->host_deinit) in plda_pcie_host_init()
639 port->host_ops->host_deinit(port); in plda_pcie_host_init()
647 pci_stop_root_bus(port->bridge->bus); in plda_pcie_host_deinit()
648 pci_remove_root_bus(port->bridge->bus); in plda_pcie_host_deinit()
652 if (port->host_ops && port->host_ops->host_deinit) in plda_pcie_host_deinit()
653 port->host_ops->host_deinit(port); in plda_pcie_host_deinit()