/linux-6.14.4/include/linux/ |
D | pagemap.h | 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 35 int filemap_invalidate_pages(struct address_space *mapping, 41 int filemap_fdatawait_keep_errors(struct address_space *mapping); 43 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 48 static inline int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument 50 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait() 54 int filemap_write_and_wait_range(struct address_space *mapping, 56 int __filemap_fdatawrite_range(struct address_space *mapping, [all …]
|
D | io-mapping.h | 17 * The io_mapping mechanism provides an abstraction for mapping 20 * See Documentation/driver-api/io-mapping.rst 35 * For small address space machines, mapping large objects 58 io_mapping_fini(struct io_mapping *mapping) in io_mapping_fini() argument 60 iomap_free(mapping->base, mapping->size); in io_mapping_fini() 65 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 70 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 71 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot); in io_mapping_map_atomic_wc() 92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_local_wc() argument [all …]
|
D | tpm_eventlog.h | 166 void *mapping = NULL; in __calc_tpm2_event_size() local 186 mapping = TPM_MEMREMAP((unsigned long)marker_start, in __calc_tpm2_event_size() 188 if (!mapping) { in __calc_tpm2_event_size() 193 mapping = marker_start; in __calc_tpm2_event_size() 196 event = (struct tcg_pcr_event2_head *)mapping; in __calc_tpm2_event_size() 233 TPM_MEMUNMAP(mapping, mapping_size); in __calc_tpm2_event_size() 235 mapping = TPM_MEMREMAP((unsigned long)marker, in __calc_tpm2_event_size() 237 if (!mapping) { in __calc_tpm2_event_size() 242 mapping = marker; in __calc_tpm2_event_size() 245 memcpy(&halg, mapping, halg_size); in __calc_tpm2_event_size() [all …]
|
/linux-6.14.4/mm/ |
D | truncate.c | 26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument 29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries() 33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries() 38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries() 48 if (mapping_shrinkable(mapping)) in clear_shadow_entries() 49 inode_add_lru(mapping->host); in clear_shadow_entries() 50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries() 60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument 63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals() 69 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals() [all …]
|
D | filemap.c | 127 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument 130 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 133 mapping_set_update(&xas, mapping); in page_cache_delete() 143 folio->mapping = NULL; in page_cache_delete() 145 mapping->nrpages -= nr; in page_cache_delete() 148 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument 161 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio() 190 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio() 208 mapping_can_writeback(mapping))) in filemap_unaccount_folio() 209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio() [all …]
|
D | readahead.c | 139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 148 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages() 210 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local 212 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() 214 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded() 223 * filesystems already specify __GFP_NOFS in their mapping's in page_cache_ra_unbounded() 228 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded() 229 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded() 252 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() [all …]
|
/linux-6.14.4/drivers/gpu/drm/panfrost/ |
D | panfrost_gem.c | 7 #include <linux/dma-mapping.h> 59 struct panfrost_gem_mapping *iter, *mapping = NULL; in panfrost_gem_mapping_get() local 65 mapping = iter; in panfrost_gem_mapping_get() 71 return mapping; in panfrost_gem_mapping_get() 75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) in panfrost_gem_teardown_mapping() argument 77 if (mapping->active) in panfrost_gem_teardown_mapping() 78 panfrost_mmu_unmap(mapping); in panfrost_gem_teardown_mapping() 80 spin_lock(&mapping->mmu->mm_lock); in panfrost_gem_teardown_mapping() 81 if (drm_mm_node_allocated(&mapping->mmnode)) in panfrost_gem_teardown_mapping() 82 drm_mm_remove_node(&mapping->mmnode); in panfrost_gem_teardown_mapping() [all …]
|
/linux-6.14.4/arch/arm/mm/ |
D | dma-mapping.c | 3 * linux/arch/arm/mm/dma-mapping.c 7 * DMA uncached mapping support. 290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap() 603 * Free a buffer as defined by the above mapping. 677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() [all …]
|
D | flush.c | 199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument 202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_folio() 204 * coherent with the kernels mapping. in __flush_dcache_folio() 234 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_folio() 238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument 248 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases() 253 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases() 281 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 289 struct address_space *mapping; in __sync_icache_dcache() local [all …]
|
/linux-6.14.4/drivers/gpu/drm/tegra/ |
D | uapi.c | 17 struct tegra_drm_mapping *mapping = in tegra_drm_mapping_release() local 20 host1x_bo_unpin(mapping->map); in tegra_drm_mapping_release() 21 host1x_bo_put(mapping->bo); in tegra_drm_mapping_release() 23 kfree(mapping); in tegra_drm_mapping_release() 26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) in tegra_drm_mapping_put() argument 28 kref_put(&mapping->ref, tegra_drm_mapping_release); in tegra_drm_mapping_put() 33 struct tegra_drm_mapping *mapping; in tegra_drm_channel_context_close() local 39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close() 40 tegra_drm_mapping_put(mapping); in tegra_drm_channel_context_close() 189 struct tegra_drm_mapping *mapping; in tegra_drm_ioctl_channel_map() local [all …]
|
/linux-6.14.4/include/trace/events/ |
D | filemap.h | 32 __entry->i_ino = folio->mapping->host->i_ino; 34 if (folio->mapping->host->i_sb) 35 __entry->s_dev = folio->mapping->host->i_sb->s_dev; 37 __entry->s_dev = folio->mapping->host->i_rdev; 62 struct address_space *mapping, 67 TP_ARGS(mapping, index, last_index), 77 __entry->i_ino = mapping->host->i_ino; 78 if (mapping->host->i_sb) 80 mapping->host->i_sb->s_dev; 82 __entry->s_dev = mapping->host->i_rdev; [all …]
|
/linux-6.14.4/Documentation/admin-guide/mm/ |
D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (#) Anonymous mapping, MAP_PRIVATE 27 (#) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device [all …]
|
/linux-6.14.4/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 6 #include <linux/dma-mapping.h> 53 /* unroll mapping in case something went wrong */ in etnaviv_context_map() 113 struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_remove_mapping() argument 115 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_iommu_remove_mapping() 119 etnaviv_iommu_unmap(context, mapping->vram_node.start, in etnaviv_iommu_remove_mapping() 121 drm_mm_remove_node(&mapping->vram_node); in etnaviv_iommu_remove_mapping() 124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) in etnaviv_iommu_reap_mapping() argument 126 struct etnaviv_iommu_context *context = mapping->context; in etnaviv_iommu_reap_mapping() 129 WARN_ON(mapping->use); in etnaviv_iommu_reap_mapping() 131 etnaviv_iommu_remove_mapping(context, mapping); in etnaviv_iommu_reap_mapping() [all …]
|
D | etnaviv_gem.c | 7 #include <linux/dma-mapping.h> 218 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_get_vram_mapping() local 220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { in etnaviv_gem_get_vram_mapping() 221 if (mapping->context == context) in etnaviv_gem_get_vram_mapping() 222 return mapping; in etnaviv_gem_get_vram_mapping() 228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) in etnaviv_gem_mapping_unreference() argument 230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; in etnaviv_gem_mapping_unreference() 233 WARN_ON(mapping->use == 0); in etnaviv_gem_mapping_unreference() 234 mapping->use -= 1; in etnaviv_gem_mapping_unreference() 245 struct etnaviv_vram_mapping *mapping; in etnaviv_gem_mapping_get() local [all …]
|
/linux-6.14.4/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 373 * device. The custom menu_mapping in the control @mapping is used when 384 static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping, in uvc_mapping_get_menu_value() argument 387 if (!test_bit(idx, &mapping->menu_mask)) in uvc_mapping_get_menu_value() 390 if (mapping->menu_mapping) in uvc_mapping_get_menu_value() 391 return mapping->menu_mapping[idx]; in uvc_mapping_get_menu_value() 397 uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx) in uvc_mapping_get_menu_name() argument 399 if (!test_bit(idx, &mapping->menu_mask)) in uvc_mapping_get_menu_name() 402 if (mapping->menu_names) in uvc_mapping_get_menu_name() 403 return mapping->menu_names[idx]; in uvc_mapping_get_menu_name() 405 return v4l2_ctrl_get_menu(mapping->id)[idx]; in uvc_mapping_get_menu_name() [all …]
|
/linux-6.14.4/Documentation/driver-api/ |
D | io-mapping.rst | 8 The io_mapping functions in linux/io-mapping.h provide an abstraction for 9 efficiently mapping small regions of an I/O device to the CPU. The initial 14 A mapping object is created during driver initialization using:: 20 mappable, while 'size' indicates how large a mapping region to 23 This _wc variant provides a mapping which may only be used with 27 With this mapping object, individual pages can be mapped either temporarily 31 void *io_mapping_map_local_wc(struct io_mapping *mapping, 34 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 37 'offset' is the offset within the defined mapping region. Accessing 46 Temporary mappings are only valid in the context of the caller. The mapping [all …]
|
/linux-6.14.4/drivers/gpu/drm/exynos/ |
D | exynos_drm_dma.c | 34 * drm_iommu_attach_device- attach device to iommu mapping 40 * mapping. 57 * Keep the original DMA mapping of the sub-device and in drm_iommu_attach_device() 66 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); in drm_iommu_attach_device() 68 ret = iommu_attach_device(priv->mapping, subdrv_dev); in drm_iommu_attach_device() 75 * drm_iommu_detach_device -detach device address space mapping from device 81 * mapping 92 iommu_detach_device(priv->mapping, subdrv_dev); in drm_iommu_detach_device() 102 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", in exynos_drm_register_dma() 109 if (!priv->mapping) { in exynos_drm_register_dma() [all …]
|
/linux-6.14.4/Documentation/filesystems/iomap/ |
D | design.rst | 70 of mapping function calls into the filesystem across a larger amount of 78 1. Obtain a space mapping via ``->iomap_begin`` 82 1. Revalidate the mapping and go back to (1) above, if necessary. 89 4. Release the mapping via ``->iomap_end``, if necessary 130 * **filesystem mapping lock**: This synchronization primitive is 131 internal to the filesystem and must protect the file mapping data 132 from updates while a mapping is being sampled. 138 mapping. 154 The filesystem communicates to the iomap iterator the mapping of 177 bytes, covered by this mapping. [all …]
|
/linux-6.14.4/tools/testing/selftests/arm64/mte/ |
D | check_mmap_options.c | 60 static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_anonymous_memory_mapping() argument 69 map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false); in check_anonymous_memory_mapping() 91 static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check) in check_file_memory_mapping() argument 105 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_file_memory_mapping() 130 static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping) in check_clear_prot_mte_flag() argument 140 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 161 ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping, in check_clear_prot_mte_flag() 213 "Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n"); in main() 215 …"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n… in main() 219 "Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n"); in main() [all …]
|
D | check_child_memory.c | 84 static int check_child_memory_mapping(int mem_type, int mode, int mapping) in check_child_memory_mapping() argument 93 ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping, in check_child_memory_mapping() 106 static int check_child_file_mapping(int mem_type, int mode, int mapping) in check_child_file_mapping() argument 119 map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd); in check_child_file_mapping() 170 "Check child anonymous memory with private mapping, precise mode and mmap memory\n"); in main() 172 "Check child anonymous memory with shared mapping, precise mode and mmap memory\n"); in main() 174 "Check child anonymous memory with private mapping, imprecise mode and mmap memory\n"); in main() 176 "Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n"); in main() 178 "Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n"); in main() 180 "Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n"); in main() [all …]
|
/linux-6.14.4/tools/testing/selftests/mm/ |
D | mremap_dontunmap.c | 59 "unable to unmap destination mapping"); in kernel_support_for_mremap_dontunmap() 63 "unable to unmap source mapping"); in kernel_support_for_mremap_dontunmap() 67 // This helper will just validate that an entire mapping contains the expected 94 // the source mapping mapped. 106 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple() 122 "unable to unmap destination mapping"); in mremap_dontunmap_simple() 124 "unable to unmap source mapping"); in mremap_dontunmap_simple() 128 // This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected. 148 // Try to just move the whole mapping anywhere (not fixed). in mremap_dontunmap_simple_shmem() 155 "unable to unmap source mapping"); in mremap_dontunmap_simple_shmem() [all …]
|
/linux-6.14.4/arch/arm64/kvm/ |
D | pkvm.c | 288 struct pkvm_mapping *mapping; in find_first_mapping_node() local 291 mapping = rb_entry(node, struct pkvm_mapping, node); in find_first_mapping_node() 292 if (mapping->gfn == gfn) in find_first_mapping_node() 295 node = (gfn < mapping->gfn) ? node->rb_left : node->rb_right; in find_first_mapping_node() 333 struct pkvm_mapping *mapping; in pkvm_pgtable_stage2_destroy() local 341 mapping = rb_entry(node, struct pkvm_mapping, node); in pkvm_pgtable_stage2_destroy() 342 kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn); in pkvm_pgtable_stage2_destroy() 344 rb_erase(&mapping->node, &pgt->pkvm_mappings); in pkvm_pgtable_stage2_destroy() 345 kfree(mapping); in pkvm_pgtable_stage2_destroy() 354 struct pkvm_mapping *mapping = NULL; in pkvm_pgtable_stage2_map() local [all …]
|
/linux-6.14.4/fs/gfs2/ |
D | aops.c | 95 struct inode * const inode = folio->mapping->host; in gfs2_write_jdata_folio() 126 struct inode *inode = folio->mapping->host; in __gfs2_jdata_write_folio() 143 * @mapping: The mapping to write 148 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 151 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_writepages() 161 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); in gfs2_writepages() 169 * @mapping: The mapping 177 static int gfs2_write_jdata_batch(struct address_space *mapping, in gfs2_write_jdata_batch() argument 182 struct inode *inode = mapping->host; in gfs2_write_jdata_batch() 205 if (unlikely(folio->mapping != mapping)) { in gfs2_write_jdata_batch() [all …]
|
/linux-6.14.4/drivers/sh/clk/ |
D | core.c | 340 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 345 if (!mapping) { in clk_establish_mapping() 349 * dummy mapping for root clocks with no specified ranges in clk_establish_mapping() 352 clk->mapping = &dummy_mapping; in clk_establish_mapping() 357 * If we're on a child clock and it provides no mapping of its in clk_establish_mapping() 358 * own, inherit the mapping from its root clock. in clk_establish_mapping() 361 mapping = clkp->mapping; in clk_establish_mapping() 362 BUG_ON(!mapping); in clk_establish_mapping() 366 * Establish initial mapping. in clk_establish_mapping() 368 if (!mapping->base && mapping->phys) { in clk_establish_mapping() [all …]
|
/linux-6.14.4/fs/ |
D | dax.c | 177 * @entry may no longer be the entry at the index in the mapping. 325 return page->mapping == PAGE_MAPPING_DAX_SHARED; in dax_page_is_shared() 329 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the 334 if (page->mapping != PAGE_MAPPING_DAX_SHARED) { in dax_page_share_get() 339 if (page->mapping) in dax_page_share_get() 341 page->mapping = PAGE_MAPPING_DAX_SHARED; in dax_page_share_get() 353 * whether this entry is shared by multiple files. If so, set the page->mapping 356 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument 372 WARN_ON_ONCE(page->mapping); in dax_associate_entry() 373 page->mapping = mapping; in dax_associate_entry() [all …]
|