Lines Matching full:iommu

13 #include <linux/iommu.h>
18 #include "iommu.h"
24 struct intel_iommu *iommu, struct device *dev, in cache_tage_match() argument
34 return tag->iommu == iommu; in cache_tage_match()
48 struct intel_iommu *iommu = info->iommu; in cache_tag_assign() local
58 tag->iommu = iommu; in cache_tag_assign()
66 tag->dev = iommu->iommu.dev; in cache_tag_assign()
71 if (cache_tage_match(temp, did, iommu, dev, pasid, type)) { in cache_tag_assign()
78 if (temp->iommu == iommu) in cache_tag_assign()
82 * Link cache tags of same iommu unit together, so corresponding in cache_tag_assign()
83 * flush ops can be batched for iommu unit. in cache_tag_assign()
99 struct intel_iommu *iommu = info->iommu; in cache_tag_unassign() local
105 if (cache_tage_match(tag, did, iommu, dev, pasid, type)) { in cache_tag_unassign()
203 struct intel_iommu *iommu = info->iommu; in domain_get_id_for_dev() local
212 return domain_id_iommu(domain, iommu); in domain_get_id_for_dev()
294 static void qi_batch_flush_descs(struct intel_iommu *iommu, struct qi_batch *batch) in qi_batch_flush_descs() argument
296 if (!iommu || !batch->index) in qi_batch_flush_descs()
299 qi_submit_sync(iommu, batch->descs, batch->index, 0); in qi_batch_flush_descs()
305 static void qi_batch_increment_index(struct intel_iommu *iommu, struct qi_batch *batch) in qi_batch_increment_index() argument
308 qi_batch_flush_descs(iommu, batch); in qi_batch_increment_index()
311 static void qi_batch_add_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in qi_batch_add_iotlb() argument
315 qi_desc_iotlb(iommu, did, addr, size_order, type, &batch->descs[batch->index]); in qi_batch_add_iotlb()
316 qi_batch_increment_index(iommu, batch); in qi_batch_add_iotlb()
319 static void qi_batch_add_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_batch_add_dev_iotlb() argument
327 if (!(iommu->gcmd & DMA_GCMD_TE)) in qi_batch_add_dev_iotlb()
331 qi_batch_increment_index(iommu, batch); in qi_batch_add_dev_iotlb()
334 static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, in qi_batch_add_piotlb() argument
347 qi_batch_increment_index(iommu, batch); in qi_batch_add_piotlb()
350 static void qi_batch_add_pasid_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, in qi_batch_add_pasid_dev_iotlb() argument
359 if (!(iommu->gcmd & DMA_GCMD_TE)) in qi_batch_add_pasid_dev_iotlb()
364 qi_batch_increment_index(iommu, batch); in qi_batch_add_pasid_dev_iotlb()
371 struct intel_iommu *iommu = tag->iommu; in cache_tag_flush_iotlb() local
375 qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr, in cache_tag_flush_iotlb()
384 if (!cap_pgsel_inv(iommu->cap) || in cache_tag_flush_iotlb()
385 mask > cap_max_amask_val(iommu->cap) || pages == -1) { in cache_tag_flush_iotlb()
392 if (ecap_qis(iommu->ecap)) in cache_tag_flush_iotlb()
393 qi_batch_add_iotlb(iommu, tag->domain_id, addr | ih, mask, type, in cache_tag_flush_iotlb()
396 __iommu_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type); in cache_tag_flush_iotlb()
402 struct intel_iommu *iommu = tag->iommu; in cache_tag_flush_devtlb_psi() local
410 qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, in cache_tag_flush_devtlb_psi()
413 qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, in cache_tag_flush_devtlb_psi()
418 qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid, in cache_tag_flush_devtlb_psi()
421 qi_batch_add_pasid_dev_iotlb(iommu, sid, info->pfsid, tag->pasid, in cache_tag_flush_devtlb_psi()
428 struct intel_iommu *iommu = tag->iommu; in cache_tag_flush_devtlb_all() local
435 qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0, in cache_tag_flush_devtlb_all()
438 qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0, in cache_tag_flush_devtlb_all()
449 struct intel_iommu *iommu = NULL; in cache_tag_flush_range() local
458 if (iommu && iommu != tag->iommu) in cache_tag_flush_range()
459 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range()
460 iommu = tag->iommu; in cache_tag_flush_range()
485 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range()
495 struct intel_iommu *iommu = NULL; in cache_tag_flush_all() local
501 if (iommu && iommu != tag->iommu) in cache_tag_flush_all()
502 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_all()
503 iommu = tag->iommu; in cache_tag_flush_all()
518 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_all()
536 struct intel_iommu *iommu = NULL; in cache_tag_flush_range_np() local
545 if (iommu && iommu != tag->iommu) in cache_tag_flush_range_np()
546 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range_np()
547 iommu = tag->iommu; in cache_tag_flush_range_np()
549 if (!cap_caching_mode(iommu->cap) || domain->use_first_level) { in cache_tag_flush_range_np()
550 iommu_flush_write_buffer(iommu); in cache_tag_flush_range_np()
560 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range_np()