Lines Matching full:iommu
27 #include "iommu.h"
28 #include "../dma-iommu.h"
30 #include "../iommu-pages.h"
119 * Looks up an IOMMU-probed device using its source ID.
125 * released by the iommu subsystem after being returned. The caller
129 struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid) in device_rbtree_find() argument
135 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
136 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key); in device_rbtree_find()
139 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
144 static int device_rbtree_insert(struct intel_iommu *iommu, in device_rbtree_insert() argument
150 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
151 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp); in device_rbtree_insert()
152 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
161 struct intel_iommu *iommu = info->iommu; in device_rbtree_remove() local
164 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
165 rb_erase(&info->node, &iommu->device_rbtree); in device_rbtree_remove()
166 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
190 struct intel_iommu *iommu; /* the corresponding iommu */ member
220 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
222 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
225 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
227 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
230 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
234 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
236 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
247 pr_info("IOMMU enabled\n"); in intel_iommu_setup()
251 pr_info("IOMMU disabled\n"); in intel_iommu_setup()
256 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); in intel_iommu_setup()
259 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n"); in intel_iommu_setup()
271 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
294 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
298 static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) in __iommu_calculate_sagaw() argument
302 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
303 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
306 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
310 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
316 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
321 sagaw = __iommu_calculate_sagaw(iommu); in __iommu_calculate_agaw()
331 * Calculate max SAGAW for each iommu.
333 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
335 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
339 * calculate agaw for each iommu.
343 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
345 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
348 static bool iommu_paging_structure_coherency(struct intel_iommu *iommu) in iommu_paging_structure_coherency() argument
350 return sm_supported(iommu) ? in iommu_paging_structure_coherency()
351 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
371 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, in iommu_context_addr() argument
374 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
382 if (!alloc && context_copied(iommu, bus, devfn)) in iommu_context_addr()
386 if (sm_supported(iommu)) { in iommu_context_addr()
400 context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_context_addr()
404 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
407 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
445 /* We know that this device on this chipset has its own IOMMU. in quirk_ioat_snb_local_iommu()
446 * If we find it under a different IOMMU, then the BIOS is lying in quirk_ioat_snb_local_iommu()
447 * to us. Hope that the IOMMU for this device is actually in quirk_ioat_snb_local_iommu()
458 /* we know that the this iommu should be at offset 0xa000 from vtbar */ in quirk_ioat_snb_local_iommu()
469 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) in iommu_is_dummy() argument
471 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
490 struct intel_iommu *iommu; in device_lookup_iommu() local
504 * the PF instead to find the IOMMU. */ in device_lookup_iommu()
512 for_each_iommu(iommu, drhd) { in device_lookup_iommu()
520 * which we used for the IOMMU lookup. Strictly speaking in device_lookup_iommu()
546 iommu = NULL; in device_lookup_iommu()
548 if (iommu_is_dummy(iommu, dev)) in device_lookup_iommu()
549 iommu = NULL; in device_lookup_iommu()
553 return iommu; in device_lookup_iommu()
563 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
568 if (!iommu->root_entry) in free_context_table()
572 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
576 if (!sm_supported(iommu)) in free_context_table()
579 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
584 iommu_free_page(iommu->root_entry); in free_context_table()
585 iommu->root_entry = NULL; in free_context_table()
589 static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, in pgtable_walk() argument
614 void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, in dmar_fault_dump_ptes() argument
626 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
629 if (!iommu->root_entry) { in dmar_fault_dump_ptes()
633 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
635 if (sm_supported(iommu)) in dmar_fault_dump_ptes()
642 ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); in dmar_fault_dump_ptes()
652 if (!sm_supported(iommu)) { in dmar_fault_dump_ptes()
703 pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level); in dmar_fault_dump_ptes()
716 /* Address beyond IOMMU's addressing capabilities. */ in pfn_to_dma_pte()
959 /* We can't just free the pages because the IOMMU may still be walking
981 /* iommu handling */
982 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
986 root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
989 iommu->name); in iommu_alloc_root_entry()
993 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
994 iommu->root_entry = root; in iommu_alloc_root_entry()
999 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1005 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1006 if (sm_supported(iommu)) in iommu_set_root_entry()
1009 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1010 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1012 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1015 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1018 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1024 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1027 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1028 if (sm_supported(iommu)) in iommu_set_root_entry()
1029 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in iommu_set_root_entry()
1030 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1033 void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1038 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1041 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1042 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1045 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1048 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1052 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1072 iommu->name, type); in __iommu_flush_context()
1077 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1078 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1081 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1084 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1087 void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in __iommu_flush_iotlb() argument
1090 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1109 iommu->name, type); in __iommu_flush_iotlb()
1113 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1116 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1119 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1120 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1123 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1126 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1139 struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_lookup_dev_info() argument
1146 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1208 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1213 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1216 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1217 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1219 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1222 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1225 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1228 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1233 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1234 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1235 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1238 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1241 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1244 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1249 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1250 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1253 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1254 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1255 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1258 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1261 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1264 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1268 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1270 iommu->name, ndomains); in iommu_init_domains()
1272 spin_lock_init(&iommu->lock); in iommu_init_domains()
1274 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1275 if (!iommu->domain_ids) in iommu_init_domains()
1284 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1294 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1299 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1301 if (!iommu->domain_ids) in disable_dmar_iommu()
1305 * All iommu domains must have been detached from the devices, in disable_dmar_iommu()
1308 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1312 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1313 iommu_disable_translation(iommu); in disable_dmar_iommu()
1316 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1318 if (iommu->domain_ids) { in free_dmar_iommu()
1319 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1320 iommu->domain_ids = NULL; in free_dmar_iommu()
1323 if (iommu->copied_tables) { in free_dmar_iommu()
1324 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1325 iommu->copied_tables = NULL; in free_dmar_iommu()
1329 free_context_table(iommu); in free_dmar_iommu()
1331 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1332 intel_iommu_finish_prq(iommu); in free_dmar_iommu()
1339 static bool first_level_by_default(struct intel_iommu *iommu) in first_level_by_default() argument
1342 if (!sm_supported(iommu)) in first_level_by_default()
1346 if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap)) in first_level_by_default()
1347 return ecap_flts(iommu->ecap); in first_level_by_default()
1352 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) in domain_attach_iommu() argument
1365 spin_lock(&iommu->lock); in domain_attach_iommu()
1366 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1369 spin_unlock(&iommu->lock); in domain_attach_iommu()
1374 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1375 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1377 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1381 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1384 info->iommu = iommu; in domain_attach_iommu()
1385 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1392 spin_unlock(&iommu->lock); in domain_attach_iommu()
1396 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1398 spin_unlock(&iommu->lock); in domain_attach_iommu()
1403 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) in domain_detach_iommu() argument
1410 spin_lock(&iommu->lock); in domain_detach_iommu()
1411 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1413 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1414 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1418 spin_unlock(&iommu->lock); in domain_detach_iommu()
1446 static void copied_context_tear_down(struct intel_iommu *iommu, in copied_context_tear_down() argument
1452 if (!context_copied(iommu, bus, devfn)) in copied_context_tear_down()
1455 assert_spin_locked(&iommu->lock); in copied_context_tear_down()
1460 if (did_old < cap_ndoms(iommu->cap)) { in copied_context_tear_down()
1461 iommu->flush.flush_context(iommu, did_old, in copied_context_tear_down()
1465 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in copied_context_tear_down()
1469 clear_context_copied(iommu, bus, devfn); in copied_context_tear_down()
1478 static void context_present_cache_flush(struct intel_iommu *iommu, u16 did, in context_present_cache_flush() argument
1481 if (cap_caching_mode(iommu->cap)) { in context_present_cache_flush()
1482 iommu->flush.flush_context(iommu, 0, in context_present_cache_flush()
1486 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in context_present_cache_flush()
1488 iommu_flush_write_buffer(iommu); in context_present_cache_flush()
1493 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1497 domain_lookup_dev_info(domain, iommu, bus, devfn); in domain_context_mapping_one()
1498 u16 did = domain_id_iommu(domain, iommu); in domain_context_mapping_one()
1507 spin_lock(&iommu->lock); in domain_context_mapping_one()
1509 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1514 if (context_present(context) && !context_copied(iommu, bus, devfn)) in domain_context_mapping_one()
1517 copied_context_tear_down(iommu, context, bus, devfn); in domain_context_mapping_one()
1531 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
1533 context_present_cache_flush(iommu, did, bus, devfn); in domain_context_mapping_one()
1537 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1546 struct intel_iommu *iommu = info->iommu; in domain_context_mapping_cb() local
1549 return domain_context_mapping_one(domain, iommu, in domain_context_mapping_cb()
1557 struct intel_iommu *iommu = info->iommu; in domain_context_mapping() local
1561 return domain_context_mapping_one(domain, iommu, bus, devfn); in domain_context_mapping()
1736 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one() local
1740 spin_lock(&iommu->lock); in domain_context_clear_one()
1741 context = iommu_context_addr(iommu, bus, devfn, 0); in domain_context_clear_one()
1743 spin_unlock(&iommu->lock); in domain_context_clear_one()
1749 __iommu_flush_cache(iommu, context, sizeof(*context)); in domain_context_clear_one()
1750 spin_unlock(&iommu->lock); in domain_context_clear_one()
1754 int __domain_setup_first_level(struct intel_iommu *iommu, in __domain_setup_first_level() argument
1760 return intel_pasid_setup_first_level(iommu, dev, pgd, in __domain_setup_first_level()
1762 return intel_pasid_replace_first_level(iommu, dev, pgd, pasid, did, in __domain_setup_first_level()
1763 iommu_domain_did(old, iommu), in __domain_setup_first_level()
1767 static int domain_setup_second_level(struct intel_iommu *iommu, in domain_setup_second_level() argument
1773 return intel_pasid_setup_second_level(iommu, domain, in domain_setup_second_level()
1775 return intel_pasid_replace_second_level(iommu, domain, dev, in domain_setup_second_level()
1776 iommu_domain_did(old, iommu), in domain_setup_second_level()
1780 static int domain_setup_passthrough(struct intel_iommu *iommu, in domain_setup_passthrough() argument
1785 return intel_pasid_setup_pass_through(iommu, dev, pasid); in domain_setup_passthrough()
1786 return intel_pasid_replace_pass_through(iommu, dev, in domain_setup_passthrough()
1787 iommu_domain_did(old, iommu), in domain_setup_passthrough()
1791 static int domain_setup_first_level(struct intel_iommu *iommu, in domain_setup_first_level() argument
1809 return __domain_setup_first_level(iommu, dev, pasid, in domain_setup_first_level()
1810 domain_id_iommu(domain, iommu), in domain_setup_first_level()
1818 struct intel_iommu *iommu = info->iommu; in dmar_domain_attach_device() local
1822 ret = domain_attach_iommu(domain, iommu); in dmar_domain_attach_device()
1834 if (!sm_supported(iommu)) in dmar_domain_attach_device()
1837 ret = domain_setup_first_level(iommu, domain, dev, in dmar_domain_attach_device()
1840 ret = domain_setup_second_level(iommu, domain, dev, in dmar_domain_attach_device()
1891 struct intel_iommu *iommu = info->iommu; in device_def_domain_type() local
1897 if (!ecap_pass_through(iommu->ecap)) in device_def_domain_type()
1910 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
1913 * Start from the sane iommu hardware state. in intel_iommu_init_qi()
1918 if (!iommu->qi) { in intel_iommu_init_qi()
1922 dmar_fault(-1, iommu); in intel_iommu_init_qi()
1927 dmar_disable_qi(iommu); in intel_iommu_init_qi()
1930 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
1934 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
1935 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
1937 iommu->name); in intel_iommu_init_qi()
1939 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
1940 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
1941 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
1945 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
1967 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
1997 new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); in copy_context_table()
2011 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2012 set_bit(did, iommu->domain_ids); in copy_context_table()
2014 set_context_copied(iommu, bus, devfn); in copy_context_table()
2020 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
2029 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
2039 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2041 new_ext = !!sm_supported(iommu); in copy_translation_tables()
2052 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2053 if (!iommu->copied_tables) in copy_translation_tables()
2072 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
2076 iommu->name, bus); in copy_translation_tables()
2081 spin_lock(&iommu->lock); in copy_translation_tables()
2090 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2097 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2100 spin_unlock(&iommu->lock); in copy_translation_tables()
2104 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2117 struct intel_iommu *iommu; in init_dmars() local
2120 for_each_iommu(iommu, drhd) { in init_dmars()
2122 iommu_disable_translation(iommu); in init_dmars()
2127 * Find the max pasid size of all IOMMU's in the system. in init_dmars()
2131 if (pasid_supported(iommu)) { in init_dmars()
2132 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2138 intel_iommu_init_qi(iommu); in init_dmars()
2140 ret = iommu_init_domains(iommu); in init_dmars()
2144 init_translation_status(iommu); in init_dmars()
2146 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
2147 iommu_disable_translation(iommu); in init_dmars()
2148 clear_translation_pre_enabled(iommu); in init_dmars()
2150 iommu->name); in init_dmars()
2156 * among all IOMMU's. Need to Split it later. in init_dmars()
2158 ret = iommu_alloc_root_entry(iommu); in init_dmars()
2162 if (translation_pre_enabled(iommu)) { in init_dmars()
2165 ret = copy_translation_tables(iommu); in init_dmars()
2168 * We found the IOMMU with translation in init_dmars()
2177 iommu->name); in init_dmars()
2178 iommu_disable_translation(iommu); in init_dmars()
2179 clear_translation_pre_enabled(iommu); in init_dmars()
2182 iommu->name); in init_dmars()
2186 intel_svm_check(iommu); in init_dmars()
2194 for_each_active_iommu(iommu, drhd) { in init_dmars()
2195 iommu_flush_write_buffer(iommu); in init_dmars()
2196 iommu_set_root_entry(iommu); in init_dmars()
2208 for_each_iommu(iommu, drhd) { in init_dmars()
2215 iommu_disable_protect_mem_regions(iommu); in init_dmars()
2219 iommu_flush_write_buffer(iommu); in init_dmars()
2221 if (ecap_prs(iommu->ecap)) { in init_dmars()
2227 ret = intel_iommu_enable_prq(iommu); in init_dmars()
2233 ret = dmar_set_interrupt(iommu); in init_dmars()
2241 for_each_active_iommu(iommu, drhd) { in init_dmars()
2242 disable_dmar_iommu(iommu); in init_dmars()
2243 free_dmar_iommu(iommu); in init_dmars()
2277 /* This IOMMU has *only* gfx devices. Either bypass it or in init_no_remapping_devices()
2289 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
2292 for_each_active_iommu(iommu, drhd) { in init_iommu_hw()
2293 if (iommu->qi) { in init_iommu_hw()
2294 ret = dmar_reenable_qi(iommu); in init_iommu_hw()
2300 for_each_iommu(iommu, drhd) { in init_iommu_hw()
2307 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2311 iommu_flush_write_buffer(iommu); in init_iommu_hw()
2312 iommu_set_root_entry(iommu); in init_iommu_hw()
2313 iommu_enable_translation(iommu); in init_iommu_hw()
2314 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
2323 struct intel_iommu *iommu; in iommu_flush_all() local
2325 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
2326 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2328 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2336 struct intel_iommu *iommu = NULL; in iommu_suspend() local
2341 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
2342 iommu_disable_translation(iommu); in iommu_suspend()
2344 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
2346 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
2347 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
2348 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
2349 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
2350 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
2351 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
2352 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
2353 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
2355 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
2363 struct intel_iommu *iommu = NULL; in iommu_resume() local
2368 panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
2370 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); in iommu_resume()
2374 for_each_active_iommu(iommu, drhd) { in iommu_resume()
2376 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
2378 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
2379 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
2380 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
2381 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
2382 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
2383 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
2384 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
2385 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
2387 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
2612 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
2618 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
2619 iommu_disable_translation(iommu); in intel_iommu_add()
2621 ret = iommu_init_domains(iommu); in intel_iommu_add()
2623 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
2627 intel_svm_check(iommu); in intel_iommu_add()
2634 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
2638 intel_iommu_init_qi(iommu); in intel_iommu_add()
2639 iommu_flush_write_buffer(iommu); in intel_iommu_add()
2641 if (ecap_prs(iommu->ecap)) { in intel_iommu_add()
2642 ret = intel_iommu_enable_prq(iommu); in intel_iommu_add()
2647 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
2651 iommu_set_root_entry(iommu); in intel_iommu_add()
2652 iommu_enable_translation(iommu); in intel_iommu_add()
2654 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
2658 disable_dmar_iommu(iommu); in intel_iommu_add()
2660 free_dmar_iommu(iommu); in intel_iommu_add()
2667 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
2671 if (iommu == NULL) in dmar_iommu_hotplug()
2677 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
2678 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
2731 static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) in dmar_ats_supported() argument
2746 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
2751 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
2859 struct intel_iommu *iommu = NULL; in intel_disable_iommus() local
2862 for_each_iommu(iommu, drhd) in intel_disable_iommus()
2863 iommu_disable_translation(iommu); in intel_disable_iommus()
2869 struct intel_iommu *iommu = NULL; in intel_iommu_shutdown() local
2879 iommu = drhd->iommu; in intel_iommu_shutdown()
2882 iommu_disable_protect_mem_regions(iommu); in intel_iommu_shutdown()
2885 iommu_disable_translation(iommu); in intel_iommu_shutdown()
2893 return container_of(iommu_dev, struct intel_iommu, iommu); in dev_to_intel_iommu()
2899 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in version_show() local
2900 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
2909 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in address_show() local
2910 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
2917 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in cap_show() local
2918 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
2925 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in ecap_show() local
2926 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
2933 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_supported_show() local
2934 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
2941 struct intel_iommu *iommu = dev_to_intel_iommu(dev); in domains_used_show() local
2943 bitmap_weight(iommu->domain_ids, in domains_used_show()
2944 cap_ndoms(iommu->cap))); in domains_used_show()
2959 .name = "intel-iommu",
2987 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
2990 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3006 struct intel_iommu *iommu __maybe_unused; in probe_acpi_namespace_devices()
3010 for_each_active_iommu(iommu, drhd) { in probe_acpi_namespace_devices()
3045 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3057 struct intel_iommu *iommu; in intel_iommu_init() local
3060 * Intel IOMMU is required for a TXT/tboot launch or platform in intel_iommu_init()
3094 * We exit the function here to ensure IOMMU's remapping and in intel_iommu_init()
3095 * mempool aren't setup, which means that the IOMMU's PMRs in intel_iommu_init()
3102 for_each_iommu(iommu, drhd) in intel_iommu_init()
3103 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3138 for_each_active_iommu(iommu, drhd) { in intel_iommu_init()
3144 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3146 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3147 !first_level_by_default(iommu)) { in intel_iommu_init()
3148 pr_info_once("IOMMU batching disallowed due to virtualization\n"); in intel_iommu_init()
3151 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3153 "%s", iommu->name); in intel_iommu_init()
3155 * The iommu device probe is protected by the iommu_probe_device_lock. in intel_iommu_init()
3160 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3163 iommu_pmu_register(iommu); in intel_iommu_init()
3170 for_each_iommu(iommu, drhd) { in intel_iommu_init()
3171 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3172 iommu_enable_translation(iommu); in intel_iommu_init()
3174 iommu_disable_protect_mem_regions(iommu); in intel_iommu_init()
3199 * NB - intel-iommu lacks any sort of reference counting for the users of
3223 struct intel_iommu *iommu = info->iommu; in device_block_translation() local
3231 if (sm_supported(iommu)) in device_block_translation()
3232 intel_pasid_tear_down_entry(iommu, dev, in device_block_translation()
3245 domain_detach_iommu(info->domain, iommu); in device_block_translation()
3268 static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage) in iommu_superpage_capability() argument
3274 return cap_fl1gp_support(iommu->cap) ? 2 : 1; in iommu_superpage_capability()
3276 return fls(cap_super_page_val(iommu->cap)); in iommu_superpage_capability()
3282 struct intel_iommu *iommu = info->iommu; in paging_domain_alloc() local
3301 addr_width = agaw_to_width(iommu->agaw); in paging_domain_alloc()
3302 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_alloc()
3303 addr_width = cap_mgaw(iommu->cap); in paging_domain_alloc()
3305 domain->agaw = iommu->agaw; in paging_domain_alloc()
3308 /* iommu memory access coherency */ in paging_domain_alloc()
3309 domain->iommu_coherency = iommu_paging_structure_coherency(iommu); in paging_domain_alloc()
3313 domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); in paging_domain_alloc()
3347 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_paging_flags() local
3355 if (nested_parent && !nested_supported(iommu)) in intel_iommu_domain_alloc_paging_flags()
3357 if (user_data || (dirty_tracking && !ssads_supported(iommu))) in intel_iommu_domain_alloc_paging_flags()
3366 if (!sm_supported(iommu) || !ecap_slts(iommu->ecap)) in intel_iommu_domain_alloc_paging_flags()
3370 first_stage = first_level_by_default(iommu); in intel_iommu_domain_alloc_paging_flags()
3411 struct intel_iommu *iommu = info->iommu; in paging_domain_compatible() local
3417 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in paging_domain_compatible()
3420 if (domain->dirty_ops && !ssads_supported(iommu)) in paging_domain_compatible()
3424 iommu_paging_structure_coherency(iommu)) in paging_domain_compatible()
3428 iommu_superpage_capability(iommu, dmar_domain->use_first_level)) in paging_domain_compatible()
3432 (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) in paging_domain_compatible()
3435 /* check if this iommu agaw is sufficient for max mapped address */ in paging_domain_compatible()
3436 addr_width = agaw_to_width(iommu->agaw); in paging_domain_compatible()
3437 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_compatible()
3438 addr_width = cap_mgaw(iommu->cap); in paging_domain_compatible()
3440 if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) in paging_domain_compatible()
3443 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) && in paging_domain_compatible()
3444 context_copied(iommu, info->bus, info->devfn)) in paging_domain_compatible()
3486 pr_err("%s: iommu width (%d) is not " in intel_iommu_map()
3601 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
3625 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
3662 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
3664 return ssads_supported(info->iommu); in intel_iommu_capable()
3674 struct intel_iommu *iommu; in intel_iommu_probe_device() local
3678 iommu = device_lookup_iommu(dev, &bus, &devfn); in intel_iommu_probe_device()
3679 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
3693 info->segment = iommu->segment; in intel_iommu_probe_device()
3697 info->iommu = iommu; in intel_iommu_probe_device()
3699 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
3701 dmar_ats_supported(pdev, iommu)) { in intel_iommu_probe_device()
3706 * For IOMMU that supports device IOTLB throttling in intel_iommu_probe_device()
3708 * of a VF such that IOMMU HW can gauge queue depth in intel_iommu_probe_device()
3712 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
3716 if (sm_supported(iommu)) { in intel_iommu_probe_device()
3717 if (pasid_supported(iommu)) { in intel_iommu_probe_device()
3724 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
3733 ret = device_rbtree_insert(iommu, info); in intel_iommu_probe_device()
3738 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { in intel_iommu_probe_device()
3745 if (!context_copied(iommu, info->bus, info->devfn)) { in intel_iommu_probe_device()
3764 return &iommu->iommu; in intel_iommu_probe_device()
3778 struct intel_iommu *iommu = info->iommu; in intel_iommu_release_device() local
3785 mutex_lock(&iommu->iopf_lock); in intel_iommu_release_device()
3788 mutex_unlock(&iommu->iopf_lock); in intel_iommu_release_device()
3790 if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) && in intel_iommu_release_device()
3791 !context_copied(iommu, info->bus, info->devfn)) in intel_iommu_release_device()
3869 struct intel_iommu *iommu; in intel_iommu_enable_sva() local
3874 iommu = info->iommu; in intel_iommu_enable_sva()
3875 if (!iommu) in intel_iommu_enable_sva()
3878 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
3886 * support PCI/PRI. The IOMMU side has no means to check the in intel_iommu_enable_sva()
3887 * capability of device-specific IOPF. Therefore, IOMMU can only in intel_iommu_enable_sva()
3903 struct intel_iommu *iommu = info->iommu; in context_flip_pri() local
3908 spin_lock(&iommu->lock); in context_flip_pri()
3909 if (context_copied(iommu, bus, devfn)) { in context_flip_pri()
3910 spin_unlock(&iommu->lock); in context_flip_pri()
3914 context = iommu_context_addr(iommu, bus, devfn, false); in context_flip_pri()
3916 spin_unlock(&iommu->lock); in context_flip_pri()
3926 if (!ecap_coherent(iommu->ecap)) in context_flip_pri()
3929 spin_unlock(&iommu->lock); in context_flip_pri()
3938 struct intel_iommu *iommu; in intel_iommu_enable_iopf() local
3947 iommu = info->iommu; in intel_iommu_enable_iopf()
3948 if (!iommu) in intel_iommu_enable_iopf()
3959 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3977 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3985 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf() local
3997 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_iopf()
4047 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4053 * thus not be able to bypass the IOMMU restrictions.
4059 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", in risky_device()
4080 struct intel_iommu *iommu = info->iommu; in domain_remove_dev_pasid() local
4103 domain_detach_iommu(dmar_domain, iommu); in domain_remove_dev_pasid()
4116 intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); in blocking_domain_set_dev_pasid()
4128 struct intel_iommu *iommu = info->iommu; in domain_add_dev_pasid() local
4137 ret = domain_attach_iommu(dmar_domain, iommu); in domain_add_dev_pasid()
4153 domain_detach_iommu(dmar_domain, iommu); in domain_add_dev_pasid()
4165 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid() local
4172 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) in intel_iommu_set_dev_pasid()
4178 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4190 ret = domain_setup_first_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4193 ret = domain_setup_second_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4212 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info() local
4220 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4221 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4237 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, in device_set_dirty_tracking()
4354 struct intel_iommu *iommu = info->iommu; in context_setup_pass_through() local
4357 spin_lock(&iommu->lock); in context_setup_pass_through()
4358 context = iommu_context_addr(iommu, bus, devfn, 1); in context_setup_pass_through()
4360 spin_unlock(&iommu->lock); in context_setup_pass_through()
4364 if (context_present(context) && !context_copied(iommu, bus, devfn)) { in context_setup_pass_through()
4365 spin_unlock(&iommu->lock); in context_setup_pass_through()
4369 copied_context_tear_down(iommu, context, bus, devfn); in context_setup_pass_through()
4377 context_set_address_width(context, iommu->msagaw); in context_setup_pass_through()
4381 if (!ecap_coherent(iommu->ecap)) in context_setup_pass_through()
4383 context_present_cache_flush(iommu, FLPT_DEFAULT_DID, bus, devfn); in context_setup_pass_through()
4384 spin_unlock(&iommu->lock); in context_setup_pass_through()
4410 struct intel_iommu *iommu = info->iommu; in identity_domain_attach_dev() local
4418 if (sm_supported(iommu)) { in identity_domain_attach_dev()
4419 ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID); in identity_domain_attach_dev()
4434 struct intel_iommu *iommu = info->iommu; in identity_domain_set_dev_pasid() local
4437 if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) in identity_domain_set_dev_pasid()
4440 ret = domain_setup_passthrough(iommu, dev, pasid, old); in identity_domain_set_dev_pasid()
4494 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); in quirk_iommu_igfx()
4575 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); in quirk_calpella_no_shadow_gtt()
4604 pci_info(dev, "Skip IOMMU disabling for graphics\n"); in quirk_igfx_skip_te_disable()
4698 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
4715 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4718 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4737 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob) in ecmd_submit_sync() argument
4743 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
4746 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
4748 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
4761 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
4762 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
4764 IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq, in ecmd_submit_sync()
4774 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()