Home
last modified time | relevance | path

Searched refs:blk_opf_t (Results 1 – 25 of 94) sorted by relevance

1234

/linux-6.14.4/include/linux/
Dblk_types.h205 typedef __u32 __bitwise blk_opf_t; typedef
217 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
309 #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
327 REQ_OP_READ = (__force blk_opf_t)0,
329 REQ_OP_WRITE = (__force blk_opf_t)1,
331 REQ_OP_FLUSH = (__force blk_opf_t)2,
333 REQ_OP_DISCARD = (__force blk_opf_t)3,
335 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
337 REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
339 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
[all …]
Dbuffer_head.h238 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
239 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
240 void submit_bh(blk_opf_t, struct buffer_head *);
244 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
246 blk_opf_t op_flags, bool force_lock);
429 static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) in bh_readahead()
439 static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags) in bh_read_nowait()
446 static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags) in bh_read()
459 blk_opf_t op_flags) in bh_readahead_batch()
Dbio.h356 blk_opf_t opf, gfp_t gfp_mask,
369 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) in bio_alloc()
407 unsigned short max_vecs, blk_opf_t opf);
409 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
691 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
Ddm-io.h62 blk_opf_t bi_opf; /* Request type and flags */
Dwriteback.h104 static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) in wbc_to_write_flags()
106 blk_opf_t flags = 0; in wbc_to_write_flags()
Dblktrace_api.h113 void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
Dblk-mq.h107 blk_opf_t cmd_flags; /* op and common flags */
738 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
741 blk_opf_t opf, blk_mq_req_flags_t flags,
/linux-6.14.4/drivers/md/dm-vdo/
Dio-submitter.h29 blk_opf_t operation, char *data);
33 blk_opf_t operation) in vdo_submit_metadata_vio()
Dvio.h122 blk_opf_t bi_opf, physical_block_number_t pbn);
125 blk_opf_t bi_opf, physical_block_number_t pbn);
/linux-6.14.4/drivers/block/rnbd/
Drnbd-proto.h219 static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf) in rnbd_to_bio_flags()
221 blk_opf_t bio_opf; in rnbd_to_bio_flags()
/linux-6.14.4/block/
Dbfq-iosched.h1068 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf);
1069 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf);
1071 u64 io_start_time_ns, blk_opf_t opf);
1079 blk_opf_t opf);
Dblk-flush.c95 struct blk_flush_queue *fq, blk_opf_t flags);
154 blk_opf_t cmd_flags; in blk_flush_complete_seq()
276 blk_opf_t flags) in blk_kick_flush()
Dblk-mq.h87 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) in blk_mq_get_hctx_type()
108 blk_opf_t opf, in blk_mq_map_queue()
153 blk_opf_t cmd_flags;
Dbfq-cgroup.c223 blk_opf_t opf) in bfqg_stats_update_io_add()
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_remove()
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_merged()
242 u64 io_start_time_ns, blk_opf_t opf) in bfqg_stats_update_completion()
257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_remove()
258 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_merged()
260 u64 io_start_time_ns, blk_opf_t opf) { } in bfqg_stats_update_completion()
Dfops.c28 static blk_opf_t dio_bio_write_op(struct kiocb *iocb) in dio_bio_write_op()
30 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in dio_bio_write_op()
175 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO()
321 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO_async()
Dblk-cgroup-rwstat.h62 blk_opf_t opf, uint64_t val) in blkg_rwstat_add()
Dblk-merge.c740 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; in blk_rq_set_mixed_merge()
759 static inline blk_opf_t bio_failfast(const struct bio *bio) in bio_failfast()
987 const blk_opf_t ff = bio_failfast(bio); in bio_attempt_back_merge()
1016 const blk_opf_t ff = bio_failfast(bio); in bio_attempt_front_merge()
Dblk-wbt.c533 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf) in get_limit()
565 blk_opf_t opf;
585 blk_opf_t opf) in __wbt_wait()
/linux-6.14.4/drivers/md/
Ddm-io.c306 static void do_region(const blk_opf_t opf, unsigned int region, in do_region()
386 static void dispatch_io(blk_opf_t opf, unsigned int num_regions, in dispatch_io()
413 struct dm_io_region *where, blk_opf_t opf, in async_io()
446 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, in sync_io()
/linux-6.14.4/drivers/scsi/device_handler/
Dscsi_dh_hp_sw.c83 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_tur()
132 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_start_stop()
/linux-6.14.4/fs/nilfs2/
Dbtnode.h38 blk_opf_t, struct buffer_head **, sector_t *);
/linux-6.14.4/fs/iomap/
Ddirect-io.c63 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) in iomap_dio_alloc_bio()
273 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, in iomap_dio_bio_opflags()
276 blk_opf_t opflags = REQ_SYNC | REQ_IDLE; in iomap_dio_bio_opflags()
301 blk_opf_t bio_opf; in iomap_dio_bio_iter()
/linux-6.14.4/fs/btrfs/
Dbio.h102 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
/linux-6.14.4/fs/gfs2/
Dlops.h20 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
Dlog.h85 blk_opf_t op_flags);

1234