Lines Matching +full:ats +full:- +full:supported

1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/pci-ats.h>
13 #include "../iommu-pages.h"
46 * intel_iommu_drain_pasid_prq - Drain page requests and responses for a pasid
57 * described in VT-d spec CH7.10 to drain all page requests and page
70 if (!info->pri_enabled) in intel_iommu_drain_pasid_prq()
73 iommu = info->iommu; in intel_iommu_drain_pasid_prq()
74 domain = info->domain; in intel_iommu_drain_pasid_prq()
75 sid = PCI_DEVID(info->bus, info->devfn); in intel_iommu_drain_pasid_prq()
83 reinit_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
84 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_iommu_drain_pasid_prq()
85 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_iommu_drain_pasid_prq()
89 req = &iommu->prq[head / sizeof(*req)]; in intel_iommu_drain_pasid_prq()
90 if (req->rid != sid || in intel_iommu_drain_pasid_prq()
91 (req->pasid_present && pasid != req->pasid) || in intel_iommu_drain_pasid_prq()
92 (!req->pasid_present && pasid != IOMMU_NO_PASID)) { in intel_iommu_drain_pasid_prq()
97 wait_for_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
104 * Perform steps described in VT-d spec CH7.10 to drain page in intel_iommu_drain_pasid_prq()
113 qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0, in intel_iommu_drain_pasid_prq()
116 qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]); in intel_iommu_drain_pasid_prq()
117 qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep, in intel_iommu_drain_pasid_prq()
121 reinit_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
123 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_iommu_drain_pasid_prq()
124 wait_for_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
131 int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); in is_canonical_address()
143 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
146 if (!req->lpig) in handle_bad_prq_event()
149 desc.qw0 = QI_PGRP_PASID(req->pasid) | in handle_bad_prq_event()
150 QI_PGRP_DID(req->rid) | in handle_bad_prq_event()
151 QI_PGRP_PASID_P(req->pasid_present) | in handle_bad_prq_event()
154 desc.qw1 = QI_PGRP_IDX(req->prg_index) | in handle_bad_prq_event()
155 QI_PGRP_LPIG(req->lpig); in handle_bad_prq_event()
164 if (req->rd_req) in prq_to_iommu_prot()
166 if (req->wr_req) in prq_to_iommu_prot()
168 if (req->exe_req) in prq_to_iommu_prot()
170 if (req->pm_req) in prq_to_iommu_prot()
183 event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; in intel_prq_report()
184 event.fault.prm.pasid = desc->pasid; in intel_prq_report()
185 event.fault.prm.grpid = desc->prg_index; in intel_prq_report()
188 if (desc->lpig) in intel_prq_report()
190 if (desc->pasid_present) { in intel_prq_report()
210 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
212 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
213 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
216 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
217 address = (u64)req->addr << VTD_PAGE_SHIFT; in prq_event_thread()
221 iommu->name); in prq_event_thread()
227 if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) { in prq_event_thread()
229 iommu->name); in prq_event_thread()
233 if (unlikely(req->exe_req && req->rd_req)) { in prq_event_thread()
234 pr_err("IOMMU: %s: Execution request not supported\n", in prq_event_thread()
235 iommu->name); in prq_event_thread()
240 if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) in prq_event_thread()
247 mutex_lock(&iommu->iopf_lock); in prq_event_thread()
248 dev = device_rbtree_find(iommu, req->rid); in prq_event_thread()
250 mutex_unlock(&iommu->iopf_lock); in prq_event_thread()
255 trace_prq_report(iommu, dev, req->qw_0, req->qw_1, in prq_event_thread()
256 req->qw_2, req->qw_3, in prq_event_thread()
257 iommu->prq_seq_number++); in prq_event_thread()
258 mutex_unlock(&iommu->iopf_lock); in prq_event_thread()
263 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
269 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
271 iommu->name); in prq_event_thread()
272 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
273 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
275 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
276 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
278 iommu->name); in prq_event_thread()
282 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
283 complete(&iommu->prq_complete); in prq_event_thread()
293 iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); in intel_iommu_enable_prq()
294 if (!iommu->prq) { in intel_iommu_enable_prq()
296 iommu->name); in intel_iommu_enable_prq()
297 return -ENOMEM; in intel_iommu_enable_prq()
300 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_iommu_enable_prq()
303 iommu->name); in intel_iommu_enable_prq()
304 ret = -EINVAL; in intel_iommu_enable_prq()
307 iommu->pr_irq = irq; in intel_iommu_enable_prq()
309 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_iommu_enable_prq()
310 "dmar%d-iopfq", iommu->seq_id); in intel_iommu_enable_prq()
311 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_iommu_enable_prq()
313 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_iommu_enable_prq()
314 ret = -ENOMEM; in intel_iommu_enable_prq()
317 iommu->iopf_queue = iopfq; in intel_iommu_enable_prq()
319 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_iommu_enable_prq()
322 iommu->prq_name, iommu); in intel_iommu_enable_prq()
325 iommu->name); in intel_iommu_enable_prq()
328 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_iommu_enable_prq()
329 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_iommu_enable_prq()
330 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_iommu_enable_prq()
332 init_completion(&iommu->prq_complete); in intel_iommu_enable_prq()
337 iopf_queue_free(iommu->iopf_queue); in intel_iommu_enable_prq()
338 iommu->iopf_queue = NULL; in intel_iommu_enable_prq()
341 iommu->pr_irq = 0; in intel_iommu_enable_prq()
343 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_iommu_enable_prq()
344 iommu->prq = NULL; in intel_iommu_enable_prq()
351 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_iommu_finish_prq()
352 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_iommu_finish_prq()
353 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_iommu_finish_prq()
355 if (iommu->pr_irq) { in intel_iommu_finish_prq()
356 free_irq(iommu->pr_irq, iommu); in intel_iommu_finish_prq()
357 dmar_free_hwirq(iommu->pr_irq); in intel_iommu_finish_prq()
358 iommu->pr_irq = 0; in intel_iommu_finish_prq()
361 if (iommu->iopf_queue) { in intel_iommu_finish_prq()
362 iopf_queue_free(iommu->iopf_queue); in intel_iommu_finish_prq()
363 iommu->iopf_queue = NULL; in intel_iommu_finish_prq()
366 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_iommu_finish_prq()
367 iommu->prq = NULL; in intel_iommu_finish_prq()
376 struct intel_iommu *iommu = info->iommu; in intel_iommu_page_response()
377 u8 bus = info->bus, devfn = info->devfn; in intel_iommu_page_response()
384 prm = &evt->fault.prm; in intel_iommu_page_response()
386 pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; in intel_iommu_page_response()
387 last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; in intel_iommu_page_response()
389 desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | in intel_iommu_page_response()
391 QI_PGRP_RESP_CODE(msg->code) | in intel_iommu_page_response()
393 desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); in intel_iommu_page_response()