/linux-6.14.4/block/ |
D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() 132 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() 254 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() 273 gfp_t gfp_mask) in bio_map_user_iov() 331 unsigned int len, gfp_t gfp_mask) in bio_map_kern() 415 unsigned int len, gfp_t gfp_mask, int reading) in bio_copy_kern() 549 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() 607 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() 620 void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, in blk_rq_map_user_io() 704 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern()
|
D | blk-lib.c | 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 86 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() 122 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() 196 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() 274 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, in __blkdev_issue_zeroout() 309 sector_t nr_sects, gfp_t gfp_mask, unsigned flags) in blkdev_issue_zeroout()
|
D | blk-crypto.c | 92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() 116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() 304 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep()
|
/linux-6.14.4/include/linux/ |
D | gfp.h | 241 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() 260 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() 286 static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, in alloc_pages_node_noprof() 305 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) in alloc_pages_noprof() 327 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) argument 353 #define __get_free_page(gfp_mask) \ argument 356 #define __get_dma_pages(gfp_mask, order) \ argument 395 static inline bool gfp_compaction_allowed(gfp_t gfp_mask) in gfp_compaction_allowed()
|
D | page_frag_cache.h | 46 unsigned int fragsz, gfp_t gfp_mask, in page_frag_alloc_align() 54 unsigned int fragsz, gfp_t gfp_mask) in page_frag_alloc()
|
D | cpuset.h | 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() 226 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() 231 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
|
D | page_owner.h | 29 unsigned short order, gfp_t gfp_mask) in set_page_owner() 61 unsigned short order, gfp_t gfp_mask) in set_page_owner()
|
/linux-6.14.4/fs/nfs/blocklayout/ |
D | dev.c | 294 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() 385 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() 439 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() 455 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() 484 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() 513 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_deviceid() 536 gfp_t gfp_mask) in bl_alloc_deviceid_node()
|
/linux-6.14.4/mm/ |
D | mempool.c | 197 gfp_t gfp_mask, int node_id) in mempool_init_node() 272 gfp_t gfp_mask, int node_id) in mempool_create_node_noprof() 384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc_noprof() 555 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) in mempool_alloc_slab() 574 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) in mempool_kmalloc() 587 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) in mempool_kvmalloc() 604 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) in mempool_alloc_pages()
|
D | page_alloc.c | 3212 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() 3291 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() 3323 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() 3338 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() 3511 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() 3530 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() 3556 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() 3575 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() 3670 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() 3795 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() [all …]
|
D | page_frag_cache.c | 50 gfp_t gfp_mask) in __page_frag_cache_refill() 94 unsigned int fragsz, gfp_t gfp_mask, in __page_frag_alloc_align()
|
D | swap.h | 133 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 138 static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() 177 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache()
|
D | page_owner.c | 27 gfp_t gfp_mask; member 166 gfp_t gfp_mask) in add_stack_record_to_list() 194 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, in inc_stack_record_count() 235 gfp_t gfp_mask, in __update_page_owner_handle() 314 gfp_t gfp_mask) in __set_page_owner() 598 gfp_t gfp_mask; in __dump_page_owner() local
|
D | swap_state.c | 431 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() 557 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() 655 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() 798 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, in swap_vma_readahead() 871 struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead()
|
D | readahead.c | 182 gfp_t gfp_mask, unsigned int order) in ractl_alloc_folio() 212 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() local 747 gfp_t gfp_mask = readahead_gfp_mask(mapping); in readahead_expand() local
|
/linux-6.14.4/fs/btrfs/ |
D | ulist.c | 99 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() 111 void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask) in ulist_prealloc() 201 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() 207 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge()
|
/linux-6.14.4/net/sunrpc/auth_gss/ |
D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() 271 gfp_t gfp_mask) in krb5_derive_key_v2() 372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() 504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2()
|
D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() 396 gfp_t gfp_mask) in gss_import_v2_context() 470 time64_t *endtime, gfp_t gfp_mask) in gss_krb5_import_sec_context()
|
/linux-6.14.4/lib/ |
D | generic-radix-tree.c | 24 gfp_t gfp_mask) in __genradix_ptr_alloc() 211 gfp_t gfp_mask) in __genradix_prealloc()
|
D | scatterlist.c | 152 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() 288 unsigned int nents_first_chunk, gfp_t gfp_mask, in __sg_alloc_table() 375 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() 390 gfp_t gfp_mask) in get_next_sg() 458 unsigned int left_pages, gfp_t gfp_mask) in sg_alloc_append_table_from_pages() 582 gfp_t gfp_mask) in sg_alloc_table_from_pages_segment()
|
/linux-6.14.4/include/linux/sched/ |
D | mm.h | 278 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() 279 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() 316 static inline void might_alloc(gfp_t gfp_mask) in might_alloc()
|
/linux-6.14.4/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm()
|
/linux-6.14.4/kernel/power/ |
D | snapshot.c | 191 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() 210 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() 222 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() 227 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() 297 gfp_t gfp_mask; /* mask for allocating pages */ member 301 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, in chain_init() 442 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, in alloc_rtree_node() 468 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, in add_rtree_block() 538 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, in create_zone_bm_rtree() 628 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) in create_mem_extents() [all …]
|
/linux-6.14.4/drivers/connector/ |
D | connector.c | 62 gfp_t gfp_mask, netlink_filter_fn filter, in cn_netlink_send_mult() 124 gfp_t gfp_mask) in cn_netlink_send()
|
/linux-6.14.4/rust/helpers/ |
D | page.c | 6 struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order) in rust_helper_alloc_pages()
|