Lines Matching full:i
79 * @i: iterator
90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_readable() argument
92 if (iter_is_ubuf(i)) { in fault_in_iov_iter_readable()
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
96 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_readable()
97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
121 * @i: iterator
124 * Faults in the iterator using get_user_pages(), i.e., without triggering
126 * some or all of the pages in @i aren't in memory.
133 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) in fault_in_iov_iter_writeable() argument
135 if (iter_is_ubuf(i)) { in fault_in_iov_iter_writeable()
136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
139 } else if (iter_is_iovec(i)) { in fault_in_iov_iter_writeable()
140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
162 void iov_iter_init(struct iov_iter *i, unsigned int direction, in iov_iter_init() argument
167 *i = (struct iov_iter) { in iov_iter_init()
179 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_to_iter() argument
181 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
183 if (user_backed_iter(i)) in _copy_to_iter()
185 return iterate_and_advance(i, bytes, (void *)addr, in _copy_to_iter()
214 * @i: destination iterator
234 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) in _copy_mc_to_iter() argument
236 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
238 if (user_backed_iter(i)) in _copy_mc_to_iter()
240 return iterate_and_advance(i, bytes, (void *)addr, in _copy_mc_to_iter()
247 size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in __copy_from_iter() argument
249 return iterate_and_advance(i, bytes, addr, in __copy_from_iter()
253 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter() argument
255 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
258 if (user_backed_iter(i)) in _copy_from_iter()
260 return __copy_from_iter(addr, bytes, i); in _copy_from_iter()
271 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_nocache() argument
273 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
276 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_nocache()
302 * @i: source iterator
314 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) in _copy_from_iter_flushcache() argument
316 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
319 return iterate_and_advance(i, bytes, addr, in _copy_from_iter_flushcache()
350 struct iov_iter *i) in copy_page_to_iter() argument
355 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
362 n = _copy_to_iter(kaddr + offset, n, i); in copy_page_to_iter()
379 struct iov_iter *i) in copy_page_to_iter_nofault() argument
385 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
393 n = iterate_and_advance(i, n, kaddr + offset, in copy_page_to_iter_nofault()
412 struct iov_iter *i) in copy_page_from_iter() argument
422 n = _copy_from_iter(kaddr + offset, n, i); in copy_page_from_iter()
453 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) in iov_iter_zero() argument
455 return iterate_and_advance(i, bytes, NULL, in iov_iter_zero()
461 size_t bytes, struct iov_iter *i) in copy_page_from_iter_atomic() argument
469 if (WARN_ON_ONCE(!i->data_source)) in copy_page_from_iter_atomic()
483 n = __copy_from_iter(p, n, i); in copy_page_from_iter_atomic()
493 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) in iov_iter_bvec_advance() argument
497 if (!i->count) in iov_iter_bvec_advance()
499 i->count -= size; in iov_iter_bvec_advance()
501 size += i->iov_offset; in iov_iter_bvec_advance()
503 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
508 i->iov_offset = size; in iov_iter_bvec_advance()
509 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
510 i->bvec = bvec; in iov_iter_bvec_advance()
513 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) in iov_iter_iovec_advance() argument
517 if (!i->count) in iov_iter_iovec_advance()
519 i->count -= size; in iov_iter_iovec_advance()
521 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
522 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
527 i->iov_offset = size; in iov_iter_iovec_advance()
528 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
529 i->__iov = iov; in iov_iter_iovec_advance()
532 static void iov_iter_folioq_advance(struct iov_iter *i, size_t size) in iov_iter_folioq_advance() argument
534 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_advance()
535 unsigned int slot = i->folioq_slot; in iov_iter_folioq_advance()
537 if (!i->count) in iov_iter_folioq_advance()
539 i->count -= size; in iov_iter_folioq_advance()
546 size += i->iov_offset; /* From beginning of current segment. */ in iov_iter_folioq_advance()
560 i->iov_offset = size; in iov_iter_folioq_advance()
561 i->folioq_slot = slot; in iov_iter_folioq_advance()
562 i->folioq = folioq; in iov_iter_folioq_advance()
565 void iov_iter_advance(struct iov_iter *i, size_t size) in iov_iter_advance() argument
567 if (unlikely(i->count < size)) in iov_iter_advance()
568 size = i->count; in iov_iter_advance()
569 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { in iov_iter_advance()
570 i->iov_offset += size; in iov_iter_advance()
571 i->count -= size; in iov_iter_advance()
572 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { in iov_iter_advance()
574 iov_iter_iovec_advance(i, size); in iov_iter_advance()
575 } else if (iov_iter_is_bvec(i)) { in iov_iter_advance()
576 iov_iter_bvec_advance(i, size); in iov_iter_advance()
577 } else if (iov_iter_is_folioq(i)) { in iov_iter_advance()
578 iov_iter_folioq_advance(i, size); in iov_iter_advance()
579 } else if (iov_iter_is_discard(i)) { in iov_iter_advance()
580 i->count -= size; in iov_iter_advance()
585 static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll) in iov_iter_folioq_revert() argument
587 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_revert()
588 unsigned int slot = i->folioq_slot; in iov_iter_folioq_revert()
601 i->iov_offset = fsize - unroll; in iov_iter_folioq_revert()
607 i->folioq_slot = slot; in iov_iter_folioq_revert()
608 i->folioq = folioq; in iov_iter_folioq_revert()
611 void iov_iter_revert(struct iov_iter *i, size_t unroll) in iov_iter_revert() argument
617 i->count += unroll; in iov_iter_revert()
618 if (unlikely(iov_iter_is_discard(i))) in iov_iter_revert()
620 if (unroll <= i->iov_offset) { in iov_iter_revert()
621 i->iov_offset -= unroll; in iov_iter_revert()
624 unroll -= i->iov_offset; in iov_iter_revert()
625 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { in iov_iter_revert()
630 } else if (iov_iter_is_bvec(i)) { in iov_iter_revert()
631 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
634 i->nr_segs++; in iov_iter_revert()
636 i->bvec = bvec; in iov_iter_revert()
637 i->iov_offset = n - unroll; in iov_iter_revert()
642 } else if (iov_iter_is_folioq(i)) { in iov_iter_revert()
643 i->iov_offset = 0; in iov_iter_revert()
644 iov_iter_folioq_revert(i, unroll); in iov_iter_revert()
646 const struct iovec *iov = iter_iov(i); in iov_iter_revert()
649 i->nr_segs++; in iov_iter_revert()
651 i->__iov = iov; in iov_iter_revert()
652 i->iov_offset = n - unroll; in iov_iter_revert()
664 size_t iov_iter_single_seg_count(const struct iov_iter *i) in iov_iter_single_seg_count() argument
666 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
667 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_single_seg_count()
668 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
669 if (iov_iter_is_bvec(i)) in iov_iter_single_seg_count()
670 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
672 if (unlikely(iov_iter_is_folioq(i))) in iov_iter_single_seg_count()
673 return !i->count ? 0 : in iov_iter_single_seg_count()
674 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count); in iov_iter_single_seg_count()
675 return i->count; in iov_iter_single_seg_count()
679 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, in iov_iter_kvec() argument
684 *i = (struct iov_iter){ in iov_iter_kvec()
695 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, in iov_iter_bvec() argument
700 *i = (struct iov_iter){ in iov_iter_bvec()
712 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
713 * @i: The iterator to initialise.
718 * @count: The size of the I/O buffer in bytes.
720 * Set up an I/O iterator to either draw data out of the pages attached to an
725 void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, in iov_iter_folio_queue() argument
730 *i = (struct iov_iter) { in iov_iter_folio_queue()
742 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
743 * @i: The iterator to initialise.
747 * @count: The size of the I/O buffer in bytes.
749 * Set up an I/O iterator to either draw data out of the pages attached to an
754 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, in iov_iter_xarray() argument
758 *i = (struct iov_iter) { in iov_iter_xarray()
770 * iov_iter_discard - Initialise an I/O iterator that discards data
771 * @i: The iterator to initialise.
773 * @count: The size of the I/O buffer in bytes.
775 * Set up an I/O iterator that just discards everything that's written to it.
778 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) in iov_iter_discard() argument
781 *i = (struct iov_iter){ in iov_iter_discard()
790 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_iovec() argument
793 const struct iovec *iov = iter_iov(i); in iov_iter_aligned_iovec()
794 size_t size = i->count; in iov_iter_aligned_iovec()
795 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
815 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, in iov_iter_aligned_bvec() argument
818 const struct bio_vec *bvec = i->bvec; in iov_iter_aligned_bvec()
819 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
820 size_t size = i->count; in iov_iter_aligned_bvec()
844 * @i: &struct iov_iter to restore
850 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, in iov_iter_is_aligned() argument
853 if (likely(iter_is_ubuf(i))) { in iov_iter_is_aligned()
854 if (i->count & len_mask) in iov_iter_is_aligned()
856 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
861 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_is_aligned()
862 return iov_iter_aligned_iovec(i, addr_mask, len_mask); in iov_iter_is_aligned()
864 if (iov_iter_is_bvec(i)) in iov_iter_is_aligned()
865 return iov_iter_aligned_bvec(i, addr_mask, len_mask); in iov_iter_is_aligned()
868 if (iov_iter_is_xarray(i)) { in iov_iter_is_aligned()
869 if (i->count & len_mask) in iov_iter_is_aligned()
871 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
874 if (iov_iter_is_folioq(i)) { in iov_iter_is_aligned()
875 if (i->count & len_mask) in iov_iter_is_aligned()
877 if (i->iov_offset & addr_mask) in iov_iter_is_aligned()
885 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) in iov_iter_alignment_iovec() argument
887 const struct iovec *iov = iter_iov(i); in iov_iter_alignment_iovec()
889 size_t size = i->count; in iov_iter_alignment_iovec()
890 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
907 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) in iov_iter_alignment_bvec() argument
909 const struct bio_vec *bvec = i->bvec; in iov_iter_alignment_bvec()
911 size_t size = i->count; in iov_iter_alignment_bvec()
912 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
928 unsigned long iov_iter_alignment(const struct iov_iter *i) in iov_iter_alignment() argument
930 if (likely(iter_is_ubuf(i))) { in iov_iter_alignment()
931 size_t size = i->count; in iov_iter_alignment()
933 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
938 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_alignment()
939 return iov_iter_alignment_iovec(i); in iov_iter_alignment()
941 if (iov_iter_is_bvec(i)) in iov_iter_alignment()
942 return iov_iter_alignment_bvec(i); in iov_iter_alignment()
945 if (iov_iter_is_folioq(i)) in iov_iter_alignment()
946 return i->iov_offset | i->count; in iov_iter_alignment()
947 if (iov_iter_is_xarray(i)) in iov_iter_alignment()
948 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
954 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) in iov_iter_gap_alignment() argument
958 size_t size = i->count; in iov_iter_gap_alignment()
961 if (iter_is_ubuf(i)) in iov_iter_gap_alignment()
964 if (WARN_ON(!iter_is_iovec(i))) in iov_iter_gap_alignment()
967 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
968 const struct iovec *iov = iter_iov(i) + k; in iov_iter_gap_alignment()
1085 static ssize_t iter_xarray_get_pages(struct iov_iter *i, in iter_xarray_get_pages() argument
1093 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1101 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1106 i->iov_offset += maxsize; in iter_xarray_get_pages()
1107 i->count -= maxsize; in iter_xarray_get_pages()
1112 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) in first_iovec_segment() argument
1117 if (iter_is_ubuf(i)) in first_iovec_segment()
1118 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1120 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1121 const struct iovec *iov = iter_iov(i) + k; in first_iovec_segment()
1134 static struct page *first_bvec_segment(const struct iov_iter *i, in first_bvec_segment() argument
1138 size_t skip = i->iov_offset, len; in first_bvec_segment()
1140 len = i->bvec->bv_len - skip; in first_bvec_segment()
1143 skip += i->bvec->bv_offset; in first_bvec_segment()
1144 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1149 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, in __iov_iter_get_pages_alloc() argument
1155 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1156 maxsize = i->count; in __iov_iter_get_pages_alloc()
1162 if (likely(user_backed_iter(i))) { in __iov_iter_get_pages_alloc()
1166 if (iov_iter_rw(i) != WRITE) in __iov_iter_get_pages_alloc()
1168 if (i->nofault) in __iov_iter_get_pages_alloc()
1171 addr = first_iovec_segment(i, &maxsize); in __iov_iter_get_pages_alloc()
1181 iov_iter_advance(i, maxsize); in __iov_iter_get_pages_alloc()
1184 if (iov_iter_is_bvec(i)) { in __iov_iter_get_pages_alloc()
1188 page = first_bvec_segment(i, &maxsize, start); in __iov_iter_get_pages_alloc()
1200 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1201 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1202 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1203 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1204 i->bvec++; in __iov_iter_get_pages_alloc()
1205 i->nr_segs--; in __iov_iter_get_pages_alloc()
1209 if (iov_iter_is_folioq(i)) in __iov_iter_get_pages_alloc()
1210 return iter_folioq_get_pages(i, pages, maxsize, maxpages, start); in __iov_iter_get_pages_alloc()
1211 if (iov_iter_is_xarray(i)) in __iov_iter_get_pages_alloc()
1212 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); in __iov_iter_get_pages_alloc()
1216 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, in iov_iter_get_pages2() argument
1223 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); in iov_iter_get_pages2()
1227 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, in iov_iter_get_pages_alloc2() argument
1234 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); in iov_iter_get_pages_alloc2()
1243 static int iov_npages(const struct iov_iter *i, int maxpages) in iov_npages() argument
1245 size_t skip = i->iov_offset, size = i->count; in iov_npages()
1249 for (p = iter_iov(i); size; skip = 0, p++) { in iov_npages()
1263 static int bvec_npages(const struct iov_iter *i, int maxpages) in bvec_npages() argument
1265 size_t skip = i->iov_offset, size = i->count; in bvec_npages()
1269 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1281 int iov_iter_npages(const struct iov_iter *i, int maxpages) in iov_iter_npages() argument
1283 if (unlikely(!i->count)) in iov_iter_npages()
1285 if (likely(iter_is_ubuf(i))) { in iov_iter_npages()
1286 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1287 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1291 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) in iov_iter_npages()
1292 return iov_npages(i, maxpages); in iov_iter_npages()
1293 if (iov_iter_is_bvec(i)) in iov_iter_npages()
1294 return bvec_npages(i, maxpages); in iov_iter_npages()
1295 if (iov_iter_is_folioq(i)) { in iov_iter_npages()
1296 unsigned offset = i->iov_offset % PAGE_SIZE; in iov_iter_npages()
1297 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1300 if (iov_iter_is_xarray(i)) { in iov_iter_npages()
1301 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1302 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1331 u32 i; in copy_compat_iovec_from_user() local
1336 for (i = 0; i < nr_segs; i++) { in copy_compat_iovec_from_user()
1340 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); in copy_compat_iovec_from_user()
1341 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); in copy_compat_iovec_from_user()
1348 iov[i].iov_base = compat_ptr(buf); in copy_compat_iovec_from_user()
1349 iov[i].iov_len = len; in copy_compat_iovec_from_user()
1429 struct iovec **iovp, struct iov_iter *i, in __import_iovec_ubuf() argument
1444 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1447 return i->count; in __import_iovec_ubuf()
1452 struct iov_iter *i, bool compat) in __import_iovec() argument
1459 return __import_iovec_ubuf(type, uvec, iovp, i, compat); in __import_iovec()
1492 iov_iter_init(i, type, iov, nr_segs, total_len); in __import_iovec()
1511 * @i: Pointer to iterator that will be initialized on success.
1524 struct iovec **iovp, struct iov_iter *i) in import_iovec() argument
1526 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, in import_iovec()
1531 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) in import_ubuf() argument
1538 iov_iter_ubuf(i, rw, buf, len); in import_ubuf()
1547 * @i: &struct iov_iter to restore
1550 * Used after iov_iter_save_state() to bring restore @i, if operations may
1555 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) in iov_iter_restore() argument
1557 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && in iov_iter_restore()
1558 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) in iov_iter_restore()
1560 i->iov_offset = state->iov_offset; in iov_iter_restore()
1561 i->count = state->count; in iov_iter_restore()
1562 if (iter_is_ubuf(i)) in iov_iter_restore()
1574 if (iov_iter_is_bvec(i)) in iov_iter_restore()
1575 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1577 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1578 i->nr_segs = state->nr_segs; in iov_iter_restore()
1585 static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i, in iov_iter_extract_folioq_pages() argument
1591 const struct folio_queue *folioq = i->folioq; in iov_iter_extract_folioq_pages()
1594 size_t extracted = 0, offset, slot = i->folioq_slot; in iov_iter_extract_folioq_pages()
1599 if (WARN_ON(i->iov_offset != 0)) in iov_iter_extract_folioq_pages()
1603 offset = i->iov_offset & ~PAGE_MASK; in iov_iter_extract_folioq_pages()
1613 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot); in iov_iter_extract_folioq_pages()
1618 i->count -= part; in iov_iter_extract_folioq_pages()
1619 i->iov_offset += part; in iov_iter_extract_folioq_pages()
1628 if (i->iov_offset >= fsize) { in iov_iter_extract_folioq_pages()
1629 i->iov_offset = 0; in iov_iter_extract_folioq_pages()
1638 i->folioq = folioq; in iov_iter_extract_folioq_pages()
1639 i->folioq_slot = slot; in iov_iter_extract_folioq_pages()
1647 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, in iov_iter_extract_xarray_pages() argument
1655 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1657 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages()
1685 iov_iter_advance(i, maxsize); in iov_iter_extract_xarray_pages()
1693 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, in iov_iter_extract_bvec_pages() argument
1699 size_t skip = i->iov_offset, size = 0; in iov_iter_extract_bvec_pages()
1703 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1706 if (i->iov_offset == i->bvec->bv_len) { in iov_iter_extract_bvec_pages()
1707 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1708 i->nr_segs--; in iov_iter_extract_bvec_pages()
1709 i->bvec++; in iov_iter_extract_bvec_pages()
1718 while (bi.bi_size && bi.bi_idx < i->nr_segs) { in iov_iter_extract_bvec_pages()
1719 struct bio_vec bv = bvec_iter_bvec(i->bvec, bi); in iov_iter_extract_bvec_pages()
1749 bvec_iter_advance_single(i->bvec, &bi, bv.bv_len); in iov_iter_extract_bvec_pages()
1752 iov_iter_advance(i, size); in iov_iter_extract_bvec_pages()
1760 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, in iov_iter_extract_kvec_pages() argument
1768 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages()
1772 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1774 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1777 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1778 i->nr_segs--; in iov_iter_extract_kvec_pages()
1779 i->kvec++; in iov_iter_extract_kvec_pages()
1783 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1808 iov_iter_advance(i, size); in iov_iter_extract_kvec_pages()
1824 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, in iov_iter_extract_user_pages() argument
1836 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1840 if (i->nofault) in iov_iter_extract_user_pages()
1843 addr = first_iovec_segment(i, &maxsize); in iov_iter_extract_user_pages()
1853 iov_iter_advance(i, maxsize); in iov_iter_extract_user_pages()
1859 * @i: The iterator to extract from
1900 ssize_t iov_iter_extract_pages(struct iov_iter *i, in iov_iter_extract_pages() argument
1907 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1911 if (likely(user_backed_iter(i))) in iov_iter_extract_pages()
1912 return iov_iter_extract_user_pages(i, pages, maxsize, in iov_iter_extract_pages()
1915 if (iov_iter_is_kvec(i)) in iov_iter_extract_pages()
1916 return iov_iter_extract_kvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1919 if (iov_iter_is_bvec(i)) in iov_iter_extract_pages()
1920 return iov_iter_extract_bvec_pages(i, pages, maxsize, in iov_iter_extract_pages()
1923 if (iov_iter_is_folioq(i)) in iov_iter_extract_pages()
1924 return iov_iter_extract_folioq_pages(i, pages, maxsize, in iov_iter_extract_pages()
1927 if (iov_iter_is_xarray(i)) in iov_iter_extract_pages()
1928 return iov_iter_extract_xarray_pages(i, pages, maxsize, in iov_iter_extract_pages()