Lines Matching full:iommu
5 * Originally split from drivers/iommu/intel/svm.c
11 #include "iommu.h"
13 #include "../iommu-pages.h"
64 struct intel_iommu *iommu; in intel_iommu_drain_pasid_prq() local
73 iommu = info->iommu; in intel_iommu_drain_pasid_prq()
76 did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; in intel_iommu_drain_pasid_prq()
83 reinit_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
84 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_iommu_drain_pasid_prq()
85 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_iommu_drain_pasid_prq()
89 req = &iommu->prq[head / sizeof(*req)]; in intel_iommu_drain_pasid_prq()
97 wait_for_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
112 qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]); in intel_iommu_drain_pasid_prq()
121 reinit_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
122 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_iommu_drain_pasid_prq()
123 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_iommu_drain_pasid_prq()
124 wait_for_completion(&iommu->prq_complete); in intel_iommu_drain_pasid_prq()
137 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
143 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
157 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
176 static void intel_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_prq_report() argument
200 struct intel_iommu *iommu = d; in prq_event_thread() local
210 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
212 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
213 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
216 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
220 pr_err("IOMMU: %s: Address is not canonical\n", in prq_event_thread()
221 iommu->name); in prq_event_thread()
223 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
228 pr_err("IOMMU: %s: Page request in Privilege Mode\n", in prq_event_thread()
229 iommu->name); in prq_event_thread()
234 pr_err("IOMMU: %s: Execution request not supported\n", in prq_event_thread()
235 iommu->name); in prq_event_thread()
244 * If prq is to be handled outside iommu driver via receiver of in prq_event_thread()
247 mutex_lock(&iommu->iopf_lock); in prq_event_thread()
248 dev = device_rbtree_find(iommu, req->rid); in prq_event_thread()
250 mutex_unlock(&iommu->iopf_lock); in prq_event_thread()
254 intel_prq_report(iommu, dev, req); in prq_event_thread()
255 trace_prq_report(iommu, dev, req->qw_0, req->qw_1, in prq_event_thread()
257 iommu->prq_seq_number++); in prq_event_thread()
258 mutex_unlock(&iommu->iopf_lock); in prq_event_thread()
263 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
269 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
270 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", in prq_event_thread()
271 iommu->name); in prq_event_thread()
272 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
273 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
275 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
276 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
277 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", in prq_event_thread()
278 iommu->name); in prq_event_thread()
282 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
283 complete(&iommu->prq_complete); in prq_event_thread()
288 int intel_iommu_enable_prq(struct intel_iommu *iommu) in intel_iommu_enable_prq() argument
293 iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); in intel_iommu_enable_prq()
294 if (!iommu->prq) { in intel_iommu_enable_prq()
295 pr_warn("IOMMU: %s: Failed to allocate page request queue\n", in intel_iommu_enable_prq()
296 iommu->name); in intel_iommu_enable_prq()
300 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_iommu_enable_prq()
302 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", in intel_iommu_enable_prq()
303 iommu->name); in intel_iommu_enable_prq()
307 iommu->pr_irq = irq; in intel_iommu_enable_prq()
309 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_iommu_enable_prq()
310 "dmar%d-iopfq", iommu->seq_id); in intel_iommu_enable_prq()
311 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_iommu_enable_prq()
313 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_iommu_enable_prq()
317 iommu->iopf_queue = iopfq; in intel_iommu_enable_prq()
319 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_iommu_enable_prq()
322 iommu->prq_name, iommu); in intel_iommu_enable_prq()
324 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", in intel_iommu_enable_prq()
325 iommu->name); in intel_iommu_enable_prq()
328 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_iommu_enable_prq()
329 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_iommu_enable_prq()
330 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_iommu_enable_prq()
332 init_completion(&iommu->prq_complete); in intel_iommu_enable_prq()
337 iopf_queue_free(iommu->iopf_queue); in intel_iommu_enable_prq()
338 iommu->iopf_queue = NULL; in intel_iommu_enable_prq()
341 iommu->pr_irq = 0; in intel_iommu_enable_prq()
343 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_iommu_enable_prq()
344 iommu->prq = NULL; in intel_iommu_enable_prq()
349 int intel_iommu_finish_prq(struct intel_iommu *iommu) in intel_iommu_finish_prq() argument
351 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_iommu_finish_prq()
352 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_iommu_finish_prq()
353 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_iommu_finish_prq()
355 if (iommu->pr_irq) { in intel_iommu_finish_prq()
356 free_irq(iommu->pr_irq, iommu); in intel_iommu_finish_prq()
357 dmar_free_hwirq(iommu->pr_irq); in intel_iommu_finish_prq()
358 iommu->pr_irq = 0; in intel_iommu_finish_prq()
361 if (iommu->iopf_queue) { in intel_iommu_finish_prq()
362 iopf_queue_free(iommu->iopf_queue); in intel_iommu_finish_prq()
363 iommu->iopf_queue = NULL; in intel_iommu_finish_prq()
366 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_iommu_finish_prq()
367 iommu->prq = NULL; in intel_iommu_finish_prq()
376 struct intel_iommu *iommu = info->iommu; in intel_iommu_page_response() local
397 qi_submit_sync(iommu, &desc, 1, 0); in intel_iommu_page_response()