Lines Matching +full:protection +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for RISC-V IOMMU implementations.
5 * Copyright © 2022-2024 Rivos Inc.
6 * Copyright © 2023 FORTH-ICS/CARV
13 #define pr_fmt(fmt) "riscv-iommu: " fmt
23 #include "../iommu-pages.h"
24 #include "iommu-bits.h"
37 /* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */
38 #define phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10))
39 #define ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12))
46 #define RISCV_IOMMU_MAX_PSCID (BIT(20) - 1)
48 /* Device resource-managed allocations */
58 iommu_free_pages(devres->addr, devres->order); in riscv_iommu_devres_pages_release()
66 return devres->addr == target->addr; in riscv_iommu_devres_pages_match()
74 addr = iommu_alloc_pages_node(dev_to_node(iommu->dev), in riscv_iommu_get_pages()
87 devres->addr = addr; in riscv_iommu_get_pages()
88 devres->order = order; in riscv_iommu_get_pages()
90 devres_add(iommu->dev, devres); in riscv_iommu_get_pages()
99 devres_release(iommu->dev, riscv_iommu_devres_pages_release, in riscv_iommu_free_pages()
110 _q->qid = RISCV_IOMMU_INTR_ ## name; \
111 _q->qbr = RISCV_IOMMU_REG_ ## name ## B; \
112 _q->qcr = RISCV_IOMMU_REG_ ## name ## CSR; \
113 _q->mask = _q->mask ?: (RISCV_IOMMU_DEF_ ## name ## _COUNT) - 1;\
117 #define Q_HEAD(q) ((q)->qbr + (RISCV_IOMMU_REG_CQH - RISCV_IOMMU_REG_CQB))
118 #define Q_TAIL(q) ((q)->qbr + (RISCV_IOMMU_REG_CQT - RISCV_IOMMU_REG_CQB))
119 #define Q_ITEM(q, index) ((q)->mask & (index))
120 #define Q_IPSR(q) BIT((q)->qid)
123 * Discover queue ring buffer hardware configuration, allocate in-memory
127 * @queue - data structure, configured with RISCV_IOMMU_QUEUE_INIT()
128 * @entry_size - queue single element size in bytes.
141 riscv_iommu_writeq(iommu, queue->qbr, RISCV_IOMMU_QUEUE_LOG2SZ_FIELD); in riscv_iommu_queue_alloc()
142 qb = riscv_iommu_readq(iommu, queue->qbr); in riscv_iommu_queue_alloc()
149 logsz = ilog2(queue->mask); in riscv_iommu_queue_alloc()
161 queue->phys = pfn_to_phys(FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb)); in riscv_iommu_queue_alloc()
162 queue->base = devm_ioremap(iommu->dev, queue->phys, queue_size); in riscv_iommu_queue_alloc()
168 queue->base = riscv_iommu_get_pages(iommu, order); in riscv_iommu_queue_alloc()
169 queue->phys = __pa(queue->base); in riscv_iommu_queue_alloc()
170 } while (!queue->base && logsz-- > 0); in riscv_iommu_queue_alloc()
173 if (!queue->base) in riscv_iommu_queue_alloc()
174 return -ENOMEM; in riscv_iommu_queue_alloc()
176 qb = phys_to_ppn(queue->phys) | in riscv_iommu_queue_alloc()
180 riscv_iommu_writeq(iommu, queue->qbr, qb); in riscv_iommu_queue_alloc()
181 rb = riscv_iommu_readq(iommu, queue->qbr); in riscv_iommu_queue_alloc()
183 dev_err(iommu->dev, "queue #%u allocation failed\n", queue->qid); in riscv_iommu_queue_alloc()
184 return -ENODEV; in riscv_iommu_queue_alloc()
188 queue->mask = (2U << logsz) - 1; in riscv_iommu_queue_alloc()
190 dev_dbg(iommu->dev, "queue #%u allocated 2^%u entries", in riscv_iommu_queue_alloc()
191 queue->qid, logsz + 1); in riscv_iommu_queue_alloc()
201 if (riscv_iommu_readl(queue->iommu, RISCV_IOMMU_REG_IPSR) & Q_IPSR(queue)) in riscv_iommu_queue_ipsr()
210 return (iommu->icvec >> (n * 4)) & RISCV_IOMMU_ICVEC_CIV; in riscv_iommu_queue_vec()
216 * @queue - data structure, already allocated with riscv_iommu_queue_alloc()
217 * @irq_handler - threaded interrupt handler.
223 const unsigned int irq = iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)]; in riscv_iommu_queue_enable()
227 if (queue->iommu) in riscv_iommu_queue_enable()
228 return -EBUSY; in riscv_iommu_queue_enable()
232 return -ENODEV; in riscv_iommu_queue_enable()
234 queue->iommu = iommu; in riscv_iommu_queue_enable()
237 dev_name(iommu->dev), queue); in riscv_iommu_queue_enable()
239 queue->iommu = NULL; in riscv_iommu_queue_enable()
244 if (queue->qid == RISCV_IOMMU_INTR_CQ) in riscv_iommu_queue_enable()
245 riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0); in riscv_iommu_queue_enable()
247 riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0); in riscv_iommu_queue_enable()
255 riscv_iommu_writel(iommu, queue->qcr, in riscv_iommu_queue_enable()
260 riscv_iommu_readl_timeout(iommu, queue->qcr, in riscv_iommu_queue_enable()
268 riscv_iommu_writel(iommu, queue->qcr, 0); in riscv_iommu_queue_enable()
270 queue->iommu = NULL; in riscv_iommu_queue_enable()
271 dev_err(iommu->dev, "queue #%u failed to start\n", queue->qid); in riscv_iommu_queue_enable()
272 return -EBUSY; in riscv_iommu_queue_enable()
287 struct riscv_iommu_device *iommu = queue->iommu; in riscv_iommu_queue_disable()
293 free_irq(iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)], queue); in riscv_iommu_queue_disable()
294 riscv_iommu_writel(iommu, queue->qcr, 0); in riscv_iommu_queue_disable()
295 riscv_iommu_readl_timeout(iommu, queue->qcr, in riscv_iommu_queue_disable()
300 dev_err(iommu->dev, "fail to disable hardware queue #%u, csr 0x%x\n", in riscv_iommu_queue_disable()
301 queue->qid, csr); in riscv_iommu_queue_disable()
303 queue->iommu = NULL; in riscv_iommu_queue_disable()
313 unsigned int head = atomic_read(&queue->head); in riscv_iommu_queue_consume()
314 unsigned int tail = atomic_read(&queue->tail); in riscv_iommu_queue_consume()
316 int available = (int)(tail - head); in riscv_iommu_queue_consume()
324 if (riscv_iommu_readl_timeout(queue->iommu, Q_TAIL(queue), in riscv_iommu_queue_consume()
325 tail, (tail & ~queue->mask) == 0, in riscv_iommu_queue_consume()
327 dev_err_once(queue->iommu->dev, in riscv_iommu_queue_consume()
336 return (int)(atomic_add_return((tail - last) & queue->mask, &queue->tail) - head); in riscv_iommu_queue_consume()
344 const unsigned int head = atomic_add_return(count, &queue->head); in riscv_iommu_queue_release()
346 riscv_iommu_writel(queue->iommu, Q_HEAD(queue), Q_ITEM(queue, head)); in riscv_iommu_queue_release()
352 const unsigned int cons = atomic_read(&queue->head); in riscv_iommu_queue_cons()
356 if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, in riscv_iommu_queue_cons()
357 !(head & ~queue->mask), in riscv_iommu_queue_cons()
361 return cons + ((head - last) & queue->mask); in riscv_iommu_queue_cons()
369 unsigned int cons = atomic_read(&queue->head); in riscv_iommu_queue_wait()
372 if ((int)(cons - index) > 0) in riscv_iommu_queue_wait()
377 (int)(cons - index) > 0, 0, timeout_us); in riscv_iommu_queue_wait()
398 prod = atomic_inc_return(&queue->prod) - 1; in riscv_iommu_queue_send()
399 head = atomic_read(&queue->head); in riscv_iommu_queue_send()
402 if ((prod - head) > queue->mask) { in riscv_iommu_queue_send()
403 if (readx_poll_timeout(atomic_read, &queue->head, in riscv_iommu_queue_send()
404 head, (prod - head) < queue->mask, in riscv_iommu_queue_send()
407 } else if ((prod - head) == queue->mask) { in riscv_iommu_queue_send()
410 if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head, in riscv_iommu_queue_send()
411 !(head & ~queue->mask) && head != last, in riscv_iommu_queue_send()
414 atomic_add((head - last) & queue->mask, &queue->head); in riscv_iommu_queue_send()
418 memcpy(queue->base + Q_ITEM(queue, prod) * entry_size, entry, entry_size); in riscv_iommu_queue_send()
421 if (readx_poll_timeout(atomic_read, &queue->tail, tail, prod == tail, in riscv_iommu_queue_send()
431 riscv_iommu_writel(queue->iommu, Q_TAIL(queue), Q_ITEM(queue, prod + 1)); in riscv_iommu_queue_send()
438 atomic_inc(&queue->tail); in riscv_iommu_queue_send()
447 dev_err_once(queue->iommu->dev, "Hardware error: command enqueue failed\n"); in riscv_iommu_queue_send()
463 ctrl = riscv_iommu_readl(queue->iommu, queue->qcr); in riscv_iommu_cmdq_process()
466 riscv_iommu_writel(queue->iommu, queue->qcr, ctrl); in riscv_iommu_cmdq_process()
467 dev_warn(queue->iommu->dev, in riscv_iommu_cmdq_process()
469 queue->qid, in riscv_iommu_cmdq_process()
479 riscv_iommu_writel(queue->iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue)); in riscv_iommu_cmdq_process()
488 riscv_iommu_queue_send(&iommu->cmdq, cmd, sizeof(*cmd)); in riscv_iommu_cmd_send()
499 prod = riscv_iommu_queue_send(&iommu->cmdq, &cmd, sizeof(cmd)); in riscv_iommu_cmd_sync()
504 if (riscv_iommu_queue_wait(&iommu->cmdq, prod, timeout_us)) in riscv_iommu_cmd_sync()
505 dev_err_once(iommu->dev, in riscv_iommu_cmd_sync()
516 unsigned int err = FIELD_GET(RISCV_IOMMU_FQ_HDR_CAUSE, event->hdr); in riscv_iommu_fault()
517 unsigned int devid = FIELD_GET(RISCV_IOMMU_FQ_HDR_DID, event->hdr); in riscv_iommu_fault()
521 dev_warn_ratelimited(iommu->dev, in riscv_iommu_fault()
523 err, devid, event->iotval, event->iotval2); in riscv_iommu_fault()
530 struct riscv_iommu_device *iommu = queue->iommu; in riscv_iommu_fltq_process()
535 events = (struct riscv_iommu_fq_record *)queue->base; in riscv_iommu_fltq_process()
548 ctrl = riscv_iommu_readl(iommu, queue->qcr); in riscv_iommu_fltq_process()
550 riscv_iommu_writel(iommu, queue->qcr, ctrl); in riscv_iommu_fltq_process()
551 dev_warn(iommu->dev, in riscv_iommu_fltq_process()
553 queue->qid, in riscv_iommu_fltq_process()
565 const bool base_format = !(iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT); in riscv_iommu_get_dc()
573 if (iommu->ddt_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL || in riscv_iommu_get_dc()
574 iommu->ddt_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL) in riscv_iommu_get_dc()
579 * DDI[0]: bits 0 - 6 (1st level) (7 bits) in riscv_iommu_get_dc()
580 * DDI[1]: bits 7 - 15 (2nd level) (9 bits) in riscv_iommu_get_dc()
581 * DDI[2]: bits 16 - 23 (3rd level) (8 bits) in riscv_iommu_get_dc()
584 * DDI[0]: bits 0 - 5 (1st level) (6 bits) in riscv_iommu_get_dc()
585 * DDI[1]: bits 6 - 14 (2nd level) (9 bits) in riscv_iommu_get_dc()
586 * DDI[2]: bits 15 - 23 (3rd level) (9 bits) in riscv_iommu_get_dc()
599 depth = iommu->ddt_mode - RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL; in riscv_iommu_get_dc()
603 /* Get to the level of the non-leaf node that holds the device context */ in riscv_iommu_get_dc()
604 for (ddtp = iommu->ddt_root; depth-- > 0;) { in riscv_iommu_get_dc()
607 * Each non-leaf node is 64bits wide and on each level in riscv_iommu_get_dc()
635 /* Race setting DDT detected, re-read and retry. */ in riscv_iommu_get_dc()
643 * is 8 * 64bits, hence the (3 - base_format) below. in riscv_iommu_get_dc()
645 ddtp += (devid & ((64 << base_format) - 1)) << (3 - base_format); in riscv_iommu_get_dc()
678 return -EBUSY; in riscv_iommu_iodir_alloc()
692 return -EBUSY; in riscv_iommu_iodir_alloc()
694 iommu->ddt_phys = ppn_to_phys(ddtp); in riscv_iommu_iodir_alloc()
695 if (iommu->ddt_phys) in riscv_iommu_iodir_alloc()
696 iommu->ddt_root = devm_ioremap(iommu->dev, in riscv_iommu_iodir_alloc()
697 iommu->ddt_phys, PAGE_SIZE); in riscv_iommu_iodir_alloc()
698 if (iommu->ddt_root) in riscv_iommu_iodir_alloc()
699 memset(iommu->ddt_root, 0, PAGE_SIZE); in riscv_iommu_iodir_alloc()
702 if (!iommu->ddt_root) { in riscv_iommu_iodir_alloc()
703 iommu->ddt_root = riscv_iommu_get_pages(iommu, 0); in riscv_iommu_iodir_alloc()
704 iommu->ddt_phys = __pa(iommu->ddt_root); in riscv_iommu_iodir_alloc()
707 if (!iommu->ddt_root) in riscv_iommu_iodir_alloc()
708 return -ENOMEM; in riscv_iommu_iodir_alloc()
716 * Accepted iommu->ddt_mode is updated on success.
721 struct device *dev = iommu->dev; in riscv_iommu_iodir_set_mode()
728 return -EBUSY; in riscv_iommu_iodir_set_mode()
736 return -EINVAL; in riscv_iommu_iodir_set_mode()
741 rq_ddtp |= phys_to_ppn(iommu->ddt_phys); in riscv_iommu_iodir_set_mode()
748 return -EBUSY; in riscv_iommu_iodir_set_mode()
761 return -EINVAL; in riscv_iommu_iodir_set_mode()
774 rq_mode--; in riscv_iommu_iodir_set_mode()
785 return -EINVAL; in riscv_iommu_iodir_set_mode()
788 iommu->ddt_mode = mode; in riscv_iommu_iodir_set_mode()
806 /* This struct contains protection domain specific IOMMU driver data. */
808 struct iommu_domain domain; member
819 container_of(iommu_domain, struct riscv_iommu_domain, domain)
823 struct riscv_iommu_domain *domain; member
829 * Protection domain requiring IOATC and DevATC translation cache invalidations,
831 * Devices should be linked to the domain before first use and unlinked after
832 * the translations from the referenced protection domain can no longer be used.
835 * is disabled for those protection domains.
848 static int riscv_iommu_bond_link(struct riscv_iommu_domain *domain, in riscv_iommu_bond_link() argument
857 return -ENOMEM; in riscv_iommu_bond_link()
858 bond->dev = dev; in riscv_iommu_bond_link()
861 * List of devices attached to the domain is arranged based on in riscv_iommu_bond_link()
865 spin_lock(&domain->lock); in riscv_iommu_bond_link()
866 list_for_each(bonds, &domain->bonds) in riscv_iommu_bond_link()
867 if (dev_to_iommu(list_entry(bonds, struct riscv_iommu_bond, list)->dev) == iommu) in riscv_iommu_bond_link()
869 list_add_rcu(&bond->list, bonds); in riscv_iommu_bond_link()
870 spin_unlock(&domain->lock); in riscv_iommu_bond_link()
878 static void riscv_iommu_bond_unlink(struct riscv_iommu_domain *domain, in riscv_iommu_bond_unlink() argument
886 if (!domain) in riscv_iommu_bond_unlink()
889 spin_lock(&domain->lock); in riscv_iommu_bond_unlink()
890 list_for_each_entry(bond, &domain->bonds, list) { in riscv_iommu_bond_unlink()
893 else if (bond->dev == dev) in riscv_iommu_bond_unlink()
895 else if (dev_to_iommu(bond->dev) == iommu) in riscv_iommu_bond_unlink()
899 list_del_rcu(&found->list); in riscv_iommu_bond_unlink()
900 spin_unlock(&domain->lock); in riscv_iommu_bond_unlink()
904 * If this was the last bond between this domain and the IOMMU in riscv_iommu_bond_unlink()
905 * invalidate all cached entries for domain's PSCID. in riscv_iommu_bond_unlink()
909 riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); in riscv_iommu_bond_unlink()
919 * the hardware, when RISC-V IOMMU architecture specification update for
924 static void riscv_iommu_iotlb_inval(struct riscv_iommu_domain *domain, in riscv_iommu_iotlb_inval() argument
930 unsigned long len = end - start + 1; in riscv_iommu_iotlb_inval()
934 * For each IOMMU linked with this protection domain (via bonds->dev), in riscv_iommu_iotlb_inval()
937 * Possbile race with domain attach flow is handled by sequencing in riscv_iommu_iotlb_inval()
938 * bond creation - riscv_iommu_bond_link(), and device directory in riscv_iommu_iotlb_inval()
939 * update - riscv_iommu_iodir_update(). in riscv_iommu_iotlb_inval()
942 * -------------------------- -------------------------- in riscv_iommu_iotlb_inval()
952 * linked to the protection domain it will receive invalidation in riscv_iommu_iotlb_inval()
960 list_for_each_entry_rcu(bond, &domain->bonds, list) { in riscv_iommu_iotlb_inval()
961 iommu = dev_to_iommu(bond->dev); in riscv_iommu_iotlb_inval()
965 * to the IOMMU for the same PSCID, and with domain->bonds list in riscv_iommu_iotlb_inval()
973 riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid); in riscv_iommu_iotlb_inval()
986 list_for_each_entry_rcu(bond, &domain->bonds, list) { in riscv_iommu_iotlb_inval()
987 iommu = dev_to_iommu(bond->dev); in riscv_iommu_iotlb_inval()
1021 for (i = 0; i < fwspec->num_ids; i++) { in riscv_iommu_iodir_update()
1022 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); in riscv_iommu_iodir_update()
1023 tc = READ_ONCE(dc->tc); in riscv_iommu_iodir_update()
1027 WRITE_ONCE(dc->tc, tc & ~RISCV_IOMMU_DC_TC_V); in riscv_iommu_iodir_update()
1031 riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); in riscv_iommu_iodir_update()
1043 for (i = 0; i < fwspec->num_ids; i++) { in riscv_iommu_iodir_update()
1044 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); in riscv_iommu_iodir_update()
1045 tc = READ_ONCE(dc->tc); in riscv_iommu_iodir_update()
1048 WRITE_ONCE(dc->fsc, fsc); in riscv_iommu_iodir_update()
1049 WRITE_ONCE(dc->ta, ta & RISCV_IOMMU_PC_TA_PSCID); in riscv_iommu_iodir_update()
1052 WRITE_ONCE(dc->tc, tc); in riscv_iommu_iodir_update()
1056 riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]); in riscv_iommu_iodir_update()
1069 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_iotlb_flush_all() local
1071 riscv_iommu_iotlb_inval(domain, 0, ULONG_MAX); in riscv_iommu_iotlb_flush_all()
1077 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_iotlb_sync() local
1079 riscv_iommu_iotlb_inval(domain, gather->start, gather->end); in riscv_iommu_iotlb_sync()
1082 #define PT_SHIFT (PAGE_SHIFT - ilog2(sizeof(pte_t)))
1089 static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, in riscv_iommu_pte_free() argument
1104 riscv_iommu_pte_free(domain, pte, freelist); in riscv_iommu_pte_free()
1108 list_add_tail(&virt_to_page(ptr)->lru, freelist); in riscv_iommu_pte_free()
1113 static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain, in riscv_iommu_pte_alloc() argument
1117 unsigned long *ptr = domain->pgd_root; in riscv_iommu_pte_alloc()
1119 int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; in riscv_iommu_pte_alloc()
1125 ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); in riscv_iommu_pte_alloc()
1127 * Note: returned entry might be a non-leaf if there was in riscv_iommu_pte_alloc()
1143 * Non-leaf entry is missing, allocate and try to add to the in riscv_iommu_pte_alloc()
1147 addr = iommu_alloc_page_node(domain->numa_node, gfp); in riscv_iommu_pte_alloc()
1158 } while (level-- > 0); in riscv_iommu_pte_alloc()
1163 static unsigned long *riscv_iommu_pte_fetch(struct riscv_iommu_domain *domain, in riscv_iommu_pte_fetch() argument
1166 unsigned long *ptr = domain->pgd_root; in riscv_iommu_pte_fetch()
1168 int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2; in riscv_iommu_pte_fetch()
1173 ptr += ((iova >> shift) & (PTRS_PER_PTE - 1)); in riscv_iommu_pte_fetch()
1182 } while (level-- > 0); in riscv_iommu_pte_fetch()
1192 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_map_pages() local
1201 else if (domain->amo_enabled) in riscv_iommu_map_pages()
1207 ptr = riscv_iommu_pte_alloc(domain, iova, pgsize, gfp); in riscv_iommu_map_pages()
1209 rc = -ENOMEM; in riscv_iommu_map_pages()
1218 riscv_iommu_pte_free(domain, old, &freelist); in riscv_iommu_map_pages()
1223 --pgcount; in riscv_iommu_map_pages()
1231 * invalidate all levels of page table (i.e. leaf and non-leaf) in riscv_iommu_map_pages()
1232 * is an invalidate-all-PSCID IOTINVAL.VMA with AV=0. in riscv_iommu_map_pages()
1234 * capability.NL (non-leaf) IOTINVAL command. in riscv_iommu_map_pages()
1236 riscv_iommu_iotlb_inval(domain, 0, ULONG_MAX); in riscv_iommu_map_pages()
1248 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_unmap_pages() local
1255 ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); in riscv_iommu_unmap_pages()
1260 if (iova & (pte_size - 1)) in riscv_iommu_unmap_pages()
1267 iommu_iotlb_gather_add_page(&domain->domain, gather, iova, in riscv_iommu_unmap_pages()
1280 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_iova_to_phys() local
1284 ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); in riscv_iommu_iova_to_phys()
1288 return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); in riscv_iommu_iova_to_phys()
1293 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_free_paging_domain() local
1294 const unsigned long pfn = virt_to_pfn(domain->pgd_root); in riscv_iommu_free_paging_domain()
1296 WARN_ON(!list_empty(&domain->bonds)); in riscv_iommu_free_paging_domain()
1298 if ((int)domain->pscid > 0) in riscv_iommu_free_paging_domain()
1299 ida_free(&riscv_iommu_pscids, domain->pscid); in riscv_iommu_free_paging_domain()
1301 riscv_iommu_pte_free(domain, _io_pte_entry(pfn, _PAGE_TABLE), NULL); in riscv_iommu_free_paging_domain()
1302 kfree(domain); in riscv_iommu_free_paging_domain()
1309 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39; in riscv_iommu_pt_supported()
1312 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48; in riscv_iommu_pt_supported()
1315 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57; in riscv_iommu_pt_supported()
1323 struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); in riscv_iommu_attach_paging_domain() local
1328 if (!riscv_iommu_pt_supported(iommu, domain->pgd_mode)) in riscv_iommu_attach_paging_domain()
1329 return -ENODEV; in riscv_iommu_attach_paging_domain()
1331 fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | in riscv_iommu_attach_paging_domain()
1332 FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)); in riscv_iommu_attach_paging_domain()
1333 ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | in riscv_iommu_attach_paging_domain()
1336 if (riscv_iommu_bond_link(domain, dev)) in riscv_iommu_attach_paging_domain()
1337 return -ENOMEM; in riscv_iommu_attach_paging_domain()
1340 riscv_iommu_bond_unlink(info->domain, dev); in riscv_iommu_attach_paging_domain()
1341 info->domain = domain; in riscv_iommu_attach_paging_domain()
1358 struct riscv_iommu_domain *domain; in riscv_iommu_alloc_paging_domain() local
1365 if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57) { in riscv_iommu_alloc_paging_domain()
1368 } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48) { in riscv_iommu_alloc_paging_domain()
1371 } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39) { in riscv_iommu_alloc_paging_domain()
1376 return ERR_PTR(-ENODEV); in riscv_iommu_alloc_paging_domain()
1379 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in riscv_iommu_alloc_paging_domain()
1380 if (!domain) in riscv_iommu_alloc_paging_domain()
1381 return ERR_PTR(-ENOMEM); in riscv_iommu_alloc_paging_domain()
1383 INIT_LIST_HEAD_RCU(&domain->bonds); in riscv_iommu_alloc_paging_domain()
1384 spin_lock_init(&domain->lock); in riscv_iommu_alloc_paging_domain()
1385 domain->numa_node = dev_to_node(iommu->dev); in riscv_iommu_alloc_paging_domain()
1386 domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD); in riscv_iommu_alloc_paging_domain()
1387 domain->pgd_mode = pgd_mode; in riscv_iommu_alloc_paging_domain()
1388 domain->pgd_root = iommu_alloc_page_node(domain->numa_node, in riscv_iommu_alloc_paging_domain()
1390 if (!domain->pgd_root) { in riscv_iommu_alloc_paging_domain()
1391 kfree(domain); in riscv_iommu_alloc_paging_domain()
1392 return ERR_PTR(-ENOMEM); in riscv_iommu_alloc_paging_domain()
1395 domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, in riscv_iommu_alloc_paging_domain()
1397 if (domain->pscid < 0) { in riscv_iommu_alloc_paging_domain()
1398 iommu_free_page(domain->pgd_root); in riscv_iommu_alloc_paging_domain()
1399 kfree(domain); in riscv_iommu_alloc_paging_domain()
1400 return ERR_PTR(-ENOMEM); in riscv_iommu_alloc_paging_domain()
1404 * Note: RISC-V Privilege spec mandates that virtual addresses in riscv_iommu_alloc_paging_domain()
1405 * need to be sign-extended, so if (VA_BITS - 1) is set, all in riscv_iommu_alloc_paging_domain()
1411 * limit the available virtual addresses to VA_BITS - 1. in riscv_iommu_alloc_paging_domain()
1413 va_mask = DMA_BIT_MASK(va_bits - 1); in riscv_iommu_alloc_paging_domain()
1415 domain->domain.geometry.aperture_start = 0; in riscv_iommu_alloc_paging_domain()
1416 domain->domain.geometry.aperture_end = va_mask; in riscv_iommu_alloc_paging_domain()
1417 domain->domain.geometry.force_aperture = true; in riscv_iommu_alloc_paging_domain()
1418 domain->domain.pgsize_bitmap = va_mask & (SZ_4K | SZ_2M | SZ_1G | SZ_512G); in riscv_iommu_alloc_paging_domain()
1420 domain->domain.ops = &riscv_iommu_paging_domain_ops; in riscv_iommu_alloc_paging_domain()
1422 return &domain->domain; in riscv_iommu_alloc_paging_domain()
1433 riscv_iommu_bond_unlink(info->domain, dev); in riscv_iommu_attach_blocking_domain()
1434 info->domain = NULL; in riscv_iommu_attach_blocking_domain()
1453 riscv_iommu_bond_unlink(info->domain, dev); in riscv_iommu_attach_identity_domain()
1454 info->domain = NULL; in riscv_iommu_attach_identity_domain()
1475 return iommu_fwspec_add_ids(dev, args->args, 1); in riscv_iommu_of_xlate()
1487 if (!fwspec || !fwspec->iommu_fwnode->dev || !fwspec->num_ids) in riscv_iommu_probe_device()
1488 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1490 iommu = dev_get_drvdata(fwspec->iommu_fwnode->dev); in riscv_iommu_probe_device()
1492 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1495 * IOMMU hardware operating in fail-over BARE mode will provide in riscv_iommu_probe_device()
1498 if (iommu->ddt_mode <= RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) in riscv_iommu_probe_device()
1499 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1503 return ERR_PTR(-ENOMEM); in riscv_iommu_probe_device()
1505 * Allocate and pre-configure device context entries in in riscv_iommu_probe_device()
1509 if (iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD) in riscv_iommu_probe_device()
1511 for (i = 0; i < fwspec->num_ids; i++) { in riscv_iommu_probe_device()
1512 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); in riscv_iommu_probe_device()
1515 return ERR_PTR(-ENODEV); in riscv_iommu_probe_device()
1517 if (READ_ONCE(dc->tc) & RISCV_IOMMU_DC_TC_V) in riscv_iommu_probe_device()
1519 WRITE_ONCE(dc->tc, tc); in riscv_iommu_probe_device()
1524 return &iommu->iommu; in riscv_iommu_probe_device()
1551 * Make sure the IOMMU is switched off or in pass-through mode during in riscv_iommu_init_check()
1557 return -EBUSY; in riscv_iommu_init_check()
1562 return -EBUSY; in riscv_iommu_init_check()
1566 /* Configure accesses to in-memory data structures for CPU-native byte order. */ in riscv_iommu_init_check()
1568 !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) { in riscv_iommu_init_check()
1569 if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_END)) in riscv_iommu_init_check()
1570 return -EINVAL; in riscv_iommu_init_check()
1572 iommu->fctl ^ RISCV_IOMMU_FCTL_BE); in riscv_iommu_init_check()
1573 iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); in riscv_iommu_init_check()
1575 !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) in riscv_iommu_init_check()
1576 return -EINVAL; in riscv_iommu_init_check()
1583 if (!iommu->irqs_count) in riscv_iommu_init_check()
1584 return -EINVAL; in riscv_iommu_init_check()
1586 iommu->icvec = FIELD_PREP(RISCV_IOMMU_ICVEC_FIV, 1 % iommu->irqs_count) | in riscv_iommu_init_check()
1587 FIELD_PREP(RISCV_IOMMU_ICVEC_PIV, 2 % iommu->irqs_count) | in riscv_iommu_init_check()
1588 FIELD_PREP(RISCV_IOMMU_ICVEC_PMIV, 3 % iommu->irqs_count); in riscv_iommu_init_check()
1589 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_ICVEC, iommu->icvec); in riscv_iommu_init_check()
1590 iommu->icvec = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_ICVEC); in riscv_iommu_init_check()
1591 if (max(max(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec), in riscv_iommu_init_check()
1592 FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec)), in riscv_iommu_init_check()
1593 max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec), in riscv_iommu_init_check()
1594 FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count) in riscv_iommu_init_check()
1595 return -EINVAL; in riscv_iommu_init_check()
1602 iommu_device_unregister(&iommu->iommu); in riscv_iommu_remove()
1603 iommu_device_sysfs_remove(&iommu->iommu); in riscv_iommu_remove()
1605 riscv_iommu_queue_disable(&iommu->cmdq); in riscv_iommu_remove()
1606 riscv_iommu_queue_disable(&iommu->fltq); in riscv_iommu_remove()
1613 RISCV_IOMMU_QUEUE_INIT(&iommu->cmdq, CQ); in riscv_iommu_init()
1614 RISCV_IOMMU_QUEUE_INIT(&iommu->fltq, FQ); in riscv_iommu_init()
1618 return dev_err_probe(iommu->dev, rc, "unexpected device state\n"); in riscv_iommu_init()
1624 rc = riscv_iommu_queue_alloc(iommu, &iommu->cmdq, in riscv_iommu_init()
1629 rc = riscv_iommu_queue_alloc(iommu, &iommu->fltq, in riscv_iommu_init()
1634 rc = riscv_iommu_queue_enable(iommu, &iommu->cmdq, riscv_iommu_cmdq_process); in riscv_iommu_init()
1638 rc = riscv_iommu_queue_enable(iommu, &iommu->fltq, riscv_iommu_fltq_process); in riscv_iommu_init()
1646 rc = iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "riscv-iommu@%s", in riscv_iommu_init()
1647 dev_name(iommu->dev)); in riscv_iommu_init()
1649 dev_err_probe(iommu->dev, rc, "cannot register sysfs interface\n"); in riscv_iommu_init()
1653 rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev); in riscv_iommu_init()
1655 dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n"); in riscv_iommu_init()
1662 iommu_device_sysfs_remove(&iommu->iommu); in riscv_iommu_init()
1666 riscv_iommu_queue_disable(&iommu->fltq); in riscv_iommu_init()
1667 riscv_iommu_queue_disable(&iommu->cmdq); in riscv_iommu_init()