Lines Matching full:mapping

6 #include <linux/dma-mapping.h>
53 /* unroll mapping in case something went wrong */ in etnaviv_context_map()
113 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument
115 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping()
119 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
121 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping()
124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument
126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping()
129 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping()
131 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping()
132 etnaviv_iommu_context_put(mapping->context); in etnaviv_iommu_reap_mapping()
133 mapping->context = NULL; in etnaviv_iommu_reap_mapping()
134 list_del_init(&mapping->mmu_node); in etnaviv_iommu_reap_mapping()
169 * so we must keep its mapping. in etnaviv_iommu_find_iova()
201 * this mapping. in etnaviv_iommu_find_iova()
235 * When we can't insert the node, due to a existing mapping blocking in etnaviv_iommu_insert_exact()
242 * here to make space for the new mapping. in etnaviv_iommu_insert_exact()
266 struct etnaviv_vram_mapping *mapping, u64 va) in etnaviv_iommu_map_gem() argument
283 mapping->iova = iova; in etnaviv_iommu_map_gem()
284 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem()
285 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
291 node = &mapping->vram_node; in etnaviv_iommu_map_gem()
300 mapping->iova = node->start; in etnaviv_iommu_map_gem()
309 mapping->context = etnaviv_iommu_context_get(context); in etnaviv_iommu_map_gem()
310 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_map_gem()
318 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_unmap_gem() argument
320 WARN_ON(mapping->use); in etnaviv_iommu_unmap_gem()
324 /* Bail if the mapping has been reaped by another thread */ in etnaviv_iommu_unmap_gem()
325 if (!mapping->context) { in etnaviv_iommu_unmap_gem()
331 if (mapping->vram_node.mm == &context->mm) in etnaviv_iommu_unmap_gem()
332 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_unmap_gem()
334 list_del(&mapping->mmu_node); in etnaviv_iommu_unmap_gem()
396 struct etnaviv_vram_mapping *mapping, in etnaviv_iommu_get_suballoc_va() argument
402 if (mapping->use > 0) { in etnaviv_iommu_get_suballoc_va()
403 mapping->use++; in etnaviv_iommu_get_suballoc_va()
411 * window. Instead we manufacture a mapping to make it look uniform in etnaviv_iommu_get_suballoc_va()
415 mapping->iova = paddr - memory_base; in etnaviv_iommu_get_suballoc_va()
417 struct drm_mm_node *node = &mapping->vram_node; in etnaviv_iommu_get_suballoc_va()
426 mapping->iova = node->start; in etnaviv_iommu_get_suballoc_va()
438 list_add_tail(&mapping->mmu_node, &context->mappings); in etnaviv_iommu_get_suballoc_va()
439 mapping->use = 1; in etnaviv_iommu_get_suballoc_va()
447 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_put_suballoc_va() argument
449 struct drm_mm_node *node = &mapping->vram_node; in etnaviv_iommu_put_suballoc_va()
452 mapping->use--; in etnaviv_iommu_put_suballoc_va()
454 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) { in etnaviv_iommu_put_suballoc_va()