1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * (a large amount of code was adapted from Linux kernel. )
4 *
5 * Copyright (C) 2018-2019 HUAWEI, Inc.
6 * https://www.huawei.com/
7 * Created by Gao Xiang <[email protected]>
8 * Modified by Huang Jianan <[email protected]>
9 */
10 #include "erofs/internal.h"
11 #include "erofs/print.h"
12
13 static int z_erofs_do_map_blocks(struct erofs_inode *vi,
14 struct erofs_map_blocks *map,
15 int flags);
16
z_erofs_fill_inode(struct erofs_inode * vi)17 int z_erofs_fill_inode(struct erofs_inode *vi)
18 {
19 struct erofs_sb_info *sbi = vi->sbi;
20
21 if (!erofs_sb_has_big_pcluster(sbi) &&
22 !erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) &&
23 vi->datalayout == EROFS_INODE_COMPRESSED_FULL) {
24 vi->z_advise = 0;
25 vi->z_algorithmtype[0] = 0;
26 vi->z_algorithmtype[1] = 0;
27 vi->z_logical_clusterbits = sbi->blkszbits;
28
29 vi->flags |= EROFS_I_Z_INITED;
30 }
31 return 0;
32 }
33
z_erofs_fill_inode_lazy(struct erofs_inode * vi)34 static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
35 {
36 int ret;
37 erofs_off_t pos;
38 struct z_erofs_map_header *h;
39 char buf[sizeof(struct z_erofs_map_header)];
40 struct erofs_sb_info *sbi = vi->sbi;
41
42 if (vi->flags & EROFS_I_Z_INITED)
43 return 0;
44
45 pos = round_up(erofs_iloc(vi) + vi->inode_isize + vi->xattr_isize, 8);
46 ret = erofs_dev_read(sbi, 0, buf, pos, sizeof(buf));
47 if (ret < 0)
48 return -EIO;
49
50 h = (struct z_erofs_map_header *)buf;
51 /*
52 * if the highest bit of the 8-byte map header is set, the whole file
53 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
54 */
55 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
56 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
57 vi->fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
58 vi->z_tailextent_headlcn = 0;
59 goto out;
60 }
61
62 vi->z_advise = le16_to_cpu(h->h_advise);
63 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
64 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
65
66 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
67 erofs_err("unknown compression format %u for nid %llu",
68 vi->z_algorithmtype[0], (unsigned long long)vi->nid);
69 return -EOPNOTSUPP;
70 }
71
72 vi->z_logical_clusterbits = sbi->blkszbits + (h->h_clusterbits & 7);
73 if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
74 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
75 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
76 erofs_err("big pcluster head1/2 of compact indexes should be consistent for nid %llu",
77 vi->nid * 1ULL);
78 return -EFSCORRUPTED;
79 }
80
81 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
82 struct erofs_map_blocks map = { .index = UINT_MAX };
83
84 vi->idata_size = le16_to_cpu(h->h_idata_size);
85 ret = z_erofs_do_map_blocks(vi, &map,
86 EROFS_GET_BLOCKS_FINDTAIL);
87 if (!map.m_plen ||
88 erofs_blkoff(sbi, map.m_pa) + map.m_plen > erofs_blksiz(sbi)) {
89 erofs_err("invalid tail-packing pclustersize %llu",
90 map.m_plen | 0ULL);
91 return -EFSCORRUPTED;
92 }
93 if (ret < 0)
94 return ret;
95 }
96 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
97 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
98 struct erofs_map_blocks map = { .index = UINT_MAX };
99
100 vi->fragmentoff = le32_to_cpu(h->h_fragmentoff);
101 ret = z_erofs_do_map_blocks(vi, &map,
102 EROFS_GET_BLOCKS_FINDTAIL);
103 if (ret < 0)
104 return ret;
105 }
106 out:
107 vi->flags |= EROFS_I_Z_INITED;
108 return 0;
109 }
110
111 struct z_erofs_maprecorder {
112 struct erofs_inode *inode;
113 struct erofs_map_blocks *map;
114 void *kaddr;
115
116 unsigned long lcn;
117 /* compression extent information gathered */
118 u8 type, headtype;
119 u16 clusterofs;
120 u16 delta[2];
121 erofs_blk_t pblk, compressedblks;
122 erofs_off_t nextpackoff;
123 bool partialref;
124 };
125
z_erofs_reload_indexes(struct z_erofs_maprecorder * m,erofs_blk_t eblk)126 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
127 erofs_blk_t eblk)
128 {
129 int ret;
130 struct erofs_map_blocks *const map = m->map;
131 char *mpage = map->mpage;
132
133 if (map->index == eblk)
134 return 0;
135
136 ret = erofs_blk_read(m->inode->sbi, 0, mpage, eblk, 1);
137 if (ret < 0)
138 return -EIO;
139
140 map->index = eblk;
141
142 return 0;
143 }
144
legacy_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned long lcn)145 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
146 unsigned long lcn)
147 {
148 struct erofs_inode *const vi = m->inode;
149 struct erofs_sb_info *sbi = vi->sbi;
150 const erofs_off_t ibase = erofs_iloc(vi);
151 const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(ibase +
152 vi->inode_isize + vi->xattr_isize) +
153 lcn * sizeof(struct z_erofs_lcluster_index);
154 struct z_erofs_lcluster_index *di;
155 unsigned int advise, type;
156 int err;
157
158 err = z_erofs_reload_indexes(m, erofs_blknr(sbi, pos));
159 if (err)
160 return err;
161
162 m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
163 m->lcn = lcn;
164 di = m->kaddr + erofs_blkoff(sbi, pos);
165
166 advise = le16_to_cpu(di->di_advise);
167 type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
168 ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
169 switch (type) {
170 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
171 m->clusterofs = 1 << vi->z_logical_clusterbits;
172 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
173 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
174 if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
175 DBG_BUGON(1);
176 return -EFSCORRUPTED;
177 }
178 m->compressedblks = m->delta[0] &
179 ~Z_EROFS_LI_D0_CBLKCNT;
180 m->delta[0] = 1;
181 }
182 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
183 break;
184 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
185 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
186 if (advise & Z_EROFS_LI_PARTIAL_REF)
187 m->partialref = true;
188 m->clusterofs = le16_to_cpu(di->di_clusterofs);
189 m->pblk = le32_to_cpu(di->di_u.blkaddr);
190 break;
191 default:
192 DBG_BUGON(1);
193 return -EOPNOTSUPP;
194 }
195 m->type = type;
196 return 0;
197 }
198
decode_compactedbits(unsigned int lobits,u8 * in,unsigned int pos,u8 * type)199 static unsigned int decode_compactedbits(unsigned int lobits,
200 u8 *in, unsigned int pos, u8 *type)
201 {
202 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
203 const unsigned int lo = v & ((1 << lobits) - 1);
204
205 *type = (v >> lobits) & 3;
206 return lo;
207 }
208
get_compacted_la_distance(unsigned int lobits,unsigned int encodebits,unsigned int vcnt,u8 * in,int i)209 static int get_compacted_la_distance(unsigned int lobits,
210 unsigned int encodebits,
211 unsigned int vcnt, u8 *in, int i)
212 {
213 unsigned int lo, d1 = 0;
214 u8 type;
215
216 DBG_BUGON(i >= vcnt);
217
218 do {
219 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
220
221 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
222 return d1;
223 ++d1;
224 } while (++i < vcnt);
225
226 /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
227 if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
228 d1 += lo - 1;
229 return d1;
230 }
231
unpack_compacted_index(struct z_erofs_maprecorder * m,unsigned int amortizedshift,erofs_off_t pos,bool lookahead)232 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
233 unsigned int amortizedshift,
234 erofs_off_t pos, bool lookahead)
235 {
236 struct erofs_inode *const vi = m->inode;
237 const unsigned int lclusterbits = vi->z_logical_clusterbits;
238 unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs;
239 int i;
240 u8 *in, type;
241 bool big_pcluster;
242
243 if (1 << amortizedshift == 4 && lclusterbits <= 14)
244 vcnt = 2;
245 else if (1 << amortizedshift == 2 && lclusterbits <= 12)
246 vcnt = 16;
247 else
248 return -EOPNOTSUPP;
249
250 /* it doesn't equal to round_up(..) */
251 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
252 (vcnt << amortizedshift);
253 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
254 lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
255 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
256 eofs = erofs_blkoff(vi->sbi, pos);
257 base = round_down(eofs, vcnt << amortizedshift);
258 in = m->kaddr + base;
259
260 i = (eofs - base) >> amortizedshift;
261
262 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
263 m->type = type;
264 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
265 m->clusterofs = 1 << lclusterbits;
266
267 /* figure out lookahead_distance: delta[1] if needed */
268 if (lookahead)
269 m->delta[1] = get_compacted_la_distance(lobits,
270 encodebits, vcnt, in, i);
271 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
272 if (!big_pcluster) {
273 DBG_BUGON(1);
274 return -EFSCORRUPTED;
275 }
276 m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
277 m->delta[0] = 1;
278 return 0;
279 } else if (i + 1 != (int)vcnt) {
280 m->delta[0] = lo;
281 return 0;
282 }
283 /*
284 * since the last lcluster in the pack is special,
285 * of which lo saves delta[1] rather than delta[0].
286 * Hence, get delta[0] by the previous lcluster indirectly.
287 */
288 lo = decode_compactedbits(lobits, in,
289 encodebits * (i - 1), &type);
290 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
291 lo = 0;
292 else if (lo & Z_EROFS_LI_D0_CBLKCNT)
293 lo = 1;
294 m->delta[0] = lo + 1;
295 return 0;
296 }
297 m->clusterofs = lo;
298 m->delta[0] = 0;
299 /* figout out blkaddr (pblk) for HEAD lclusters */
300 if (!big_pcluster) {
301 nblk = 1;
302 while (i > 0) {
303 --i;
304 lo = decode_compactedbits(lobits, in,
305 encodebits * i, &type);
306 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
307 i -= lo;
308
309 if (i >= 0)
310 ++nblk;
311 }
312 } else {
313 nblk = 0;
314 while (i > 0) {
315 --i;
316 lo = decode_compactedbits(lobits, in,
317 encodebits * i, &type);
318 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
319 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
320 --i;
321 nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
322 continue;
323 }
324 if (lo <= 1) {
325 DBG_BUGON(1);
326 /* --i; ++nblk; continue; */
327 return -EFSCORRUPTED;
328 }
329 i -= lo - 2;
330 continue;
331 }
332 ++nblk;
333 }
334 }
335 in += (vcnt << amortizedshift) - sizeof(__le32);
336 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
337 return 0;
338 }
339
compacted_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned long lcn,bool lookahead)340 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
341 unsigned long lcn, bool lookahead)
342 {
343 struct erofs_inode *const vi = m->inode;
344 struct erofs_sb_info *sbi = vi->sbi;
345 const erofs_off_t ebase = round_up(erofs_iloc(vi) + vi->inode_isize +
346 vi->xattr_isize, 8) +
347 sizeof(struct z_erofs_map_header);
348 const unsigned int totalidx = BLK_ROUND_UP(sbi, vi->i_size);
349 unsigned int compacted_4b_initial, compacted_2b;
350 unsigned int amortizedshift;
351 erofs_off_t pos;
352 int err;
353
354 if (lcn >= totalidx)
355 return -EINVAL;
356
357 m->lcn = lcn;
358 /* used to align to 32-byte (compacted_2b) alignment */
359 compacted_4b_initial = (32 - ebase % 32) / 4;
360 if (compacted_4b_initial == 32 / 4)
361 compacted_4b_initial = 0;
362
363 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
364 compacted_4b_initial < totalidx)
365 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
366 else
367 compacted_2b = 0;
368
369 pos = ebase;
370 if (lcn < compacted_4b_initial) {
371 amortizedshift = 2;
372 goto out;
373 }
374 pos += compacted_4b_initial * 4;
375 lcn -= compacted_4b_initial;
376
377 if (lcn < compacted_2b) {
378 amortizedshift = 1;
379 goto out;
380 }
381 pos += compacted_2b * 2;
382 lcn -= compacted_2b;
383 amortizedshift = 2;
384 out:
385 pos += lcn * (1 << amortizedshift);
386 err = z_erofs_reload_indexes(m, erofs_blknr(sbi, pos));
387 if (err)
388 return err;
389 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
390 }
391
z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder * m,unsigned int lcn,bool lookahead)392 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
393 unsigned int lcn, bool lookahead)
394 {
395 const unsigned int datamode = m->inode->datalayout;
396
397 if (datamode == EROFS_INODE_COMPRESSED_FULL)
398 return legacy_load_cluster_from_disk(m, lcn);
399
400 if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
401 return compacted_load_cluster_from_disk(m, lcn, lookahead);
402
403 return -EINVAL;
404 }
405
z_erofs_extent_lookback(struct z_erofs_maprecorder * m,unsigned int lookback_distance)406 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
407 unsigned int lookback_distance)
408 {
409 struct erofs_inode *const vi = m->inode;
410 struct erofs_map_blocks *const map = m->map;
411 const unsigned int lclusterbits = vi->z_logical_clusterbits;
412 unsigned long lcn = m->lcn;
413 int err;
414
415 if (lcn < lookback_distance) {
416 erofs_err("bogus lookback distance @ nid %llu",
417 (unsigned long long)vi->nid);
418 DBG_BUGON(1);
419 return -EFSCORRUPTED;
420 }
421
422 /* load extent head logical cluster if needed */
423 lcn -= lookback_distance;
424 err = z_erofs_load_cluster_from_disk(m, lcn, false);
425 if (err)
426 return err;
427
428 switch (m->type) {
429 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
430 if (!m->delta[0]) {
431 erofs_err("invalid lookback distance 0 @ nid %llu",
432 (unsigned long long)vi->nid);
433 DBG_BUGON(1);
434 return -EFSCORRUPTED;
435 }
436 return z_erofs_extent_lookback(m, m->delta[0]);
437 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
438 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
439 m->headtype = m->type;
440 map->m_la = (lcn << lclusterbits) | m->clusterofs;
441 break;
442 default:
443 erofs_err("unknown type %u @ lcn %lu of nid %llu",
444 m->type, lcn, (unsigned long long)vi->nid);
445 DBG_BUGON(1);
446 return -EOPNOTSUPP;
447 }
448 return 0;
449 }
450
z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder * m,unsigned int initial_lcn)451 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
452 unsigned int initial_lcn)
453 {
454 struct erofs_inode *const vi = m->inode;
455 struct erofs_sb_info *sbi = vi->sbi;
456 struct erofs_map_blocks *const map = m->map;
457 const unsigned int lclusterbits = vi->z_logical_clusterbits;
458 unsigned long lcn;
459 int err;
460
461 DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
462 m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1);
463
464 if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
465 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
466 map->m_plen = 1 << lclusterbits;
467 return 0;
468 }
469
470 lcn = m->lcn + 1;
471 if (m->compressedblks)
472 goto out;
473
474 err = z_erofs_load_cluster_from_disk(m, lcn, false);
475 if (err)
476 return err;
477
478 /*
479 * If the 1st NONHEAD lcluster has already been handled initially w/o
480 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
481 * an internal implemenatation error is detected.
482 *
483 * The following code can also handle it properly anyway, but let's
484 * BUG_ON in the debugging mode only for developers to notice that.
485 */
486 DBG_BUGON(lcn == initial_lcn &&
487 m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
488
489 switch (m->type) {
490 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
491 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
492 /*
493 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
494 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
495 */
496 m->compressedblks = 1 << (lclusterbits - sbi->blkszbits);
497 break;
498 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
499 if (m->delta[0] != 1)
500 goto err_bonus_cblkcnt;
501 if (m->compressedblks)
502 break;
503 /* fallthrough */
504 default:
505 erofs_err("cannot found CBLKCNT @ lcn %lu of nid %llu",
506 lcn, vi->nid | 0ULL);
507 DBG_BUGON(1);
508 return -EFSCORRUPTED;
509 }
510 out:
511 map->m_plen = m->compressedblks << sbi->blkszbits;
512 return 0;
513 err_bonus_cblkcnt:
514 erofs_err("bogus CBLKCNT @ lcn %lu of nid %llu",
515 lcn, vi->nid | 0ULL);
516 DBG_BUGON(1);
517 return -EFSCORRUPTED;
518 }
519
z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder * m)520 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
521 {
522 struct erofs_inode *const vi = m->inode;
523 struct erofs_map_blocks *map = m->map;
524 unsigned int lclusterbits = vi->z_logical_clusterbits;
525 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
526 int err;
527
528 do {
529 /* handle the last EOF pcluster (no next HEAD lcluster) */
530 if ((lcn << lclusterbits) >= vi->i_size) {
531 map->m_llen = vi->i_size - map->m_la;
532 return 0;
533 }
534
535 err = z_erofs_load_cluster_from_disk(m, lcn, true);
536 if (err)
537 return err;
538
539 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
540 DBG_BUGON(!m->delta[1] &&
541 m->clusterofs != 1 << lclusterbits);
542 } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
543 m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1) {
544 /* go on until the next HEAD lcluster */
545 if (lcn != headlcn)
546 break;
547 m->delta[1] = 1;
548 } else {
549 erofs_err("unknown type %u @ lcn %llu of nid %llu",
550 m->type, lcn | 0ULL,
551 (unsigned long long)vi->nid);
552 DBG_BUGON(1);
553 return -EOPNOTSUPP;
554 }
555 lcn += m->delta[1];
556 } while (m->delta[1]);
557
558 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
559 return 0;
560 }
561
z_erofs_do_map_blocks(struct erofs_inode * vi,struct erofs_map_blocks * map,int flags)562 static int z_erofs_do_map_blocks(struct erofs_inode *vi,
563 struct erofs_map_blocks *map,
564 int flags)
565 {
566 struct erofs_sb_info *sbi = vi->sbi;
567 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
568 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
569 struct z_erofs_maprecorder m = {
570 .inode = vi,
571 .map = map,
572 .kaddr = map->mpage,
573 };
574 int err = 0;
575 unsigned int lclusterbits, endoff;
576 unsigned long initial_lcn;
577 unsigned long long ofs, end;
578
579 lclusterbits = vi->z_logical_clusterbits;
580 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? vi->i_size - 1 : map->m_la;
581 initial_lcn = ofs >> lclusterbits;
582 endoff = ofs & ((1 << lclusterbits) - 1);
583
584 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
585 if (err)
586 goto out;
587
588 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
589 vi->z_idataoff = m.nextpackoff;
590
591 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
592 end = (m.lcn + 1ULL) << lclusterbits;
593 switch (m.type) {
594 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
595 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
596 if (endoff >= m.clusterofs) {
597 m.headtype = m.type;
598 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
599 /*
600 * For ztailpacking files, in order to inline data more
601 * effectively, special EOF lclusters are now supported
602 * which can have three parts at most.
603 */
604 if (ztailpacking && end > vi->i_size)
605 end = vi->i_size;
606 break;
607 }
608 /* m.lcn should be >= 1 if endoff < m.clusterofs */
609 if (!m.lcn) {
610 erofs_err("invalid logical cluster 0 at nid %llu",
611 (unsigned long long)vi->nid);
612 err = -EFSCORRUPTED;
613 goto out;
614 }
615 end = (m.lcn << lclusterbits) | m.clusterofs;
616 map->m_flags |= EROFS_MAP_FULL_MAPPED;
617 m.delta[0] = 1;
618 /* fallthrough */
619 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
620 /* get the correspoinding first chunk */
621 err = z_erofs_extent_lookback(&m, m.delta[0]);
622 if (err)
623 goto out;
624 break;
625 default:
626 erofs_err("unknown type %u @ offset %llu of nid %llu",
627 m.type, ofs, (unsigned long long)vi->nid);
628 err = -EOPNOTSUPP;
629 goto out;
630 }
631 if (m.partialref)
632 map->m_flags |= EROFS_MAP_PARTIAL_REF;
633 map->m_llen = end - map->m_la;
634 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
635 vi->z_tailextent_headlcn = m.lcn;
636 /* for non-compact indexes, fragmentoff is 64 bits */
637 if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
638 vi->fragmentoff |= (u64)m.pblk << 32;
639 }
640 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
641 map->m_flags |= EROFS_MAP_META;
642 map->m_pa = vi->z_idataoff;
643 map->m_plen = vi->z_idata_size;
644 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
645 map->m_flags |= EROFS_MAP_FRAGMENT;
646 } else {
647 map->m_pa = erofs_pos(sbi, m.pblk);
648 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
649 if (err)
650 goto out;
651 }
652
653 if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
654 if (map->m_llen > map->m_plen) {
655 DBG_BUGON(1);
656 err = -EFSCORRUPTED;
657 goto out;
658 }
659 if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
660 map->m_algorithmformat =
661 Z_EROFS_COMPRESSION_INTERLACED;
662 else
663 map->m_algorithmformat =
664 Z_EROFS_COMPRESSION_SHIFTED;
665 } else {
666 map->m_algorithmformat = vi->z_algorithmtype[0];
667 }
668
669 if (flags & EROFS_GET_BLOCKS_FIEMAP) {
670 err = z_erofs_get_extent_decompressedlen(&m);
671 if (!err)
672 map->m_flags |= EROFS_MAP_FULL_MAPPED;
673 }
674
675 out:
676 erofs_dbg("m_la %" PRIu64 " m_pa %" PRIu64 " m_llen %" PRIu64 " m_plen %" PRIu64 " m_flags 0%o",
677 map->m_la, map->m_pa,
678 map->m_llen, map->m_plen, map->m_flags);
679 return err;
680 }
681
z_erofs_map_blocks_iter(struct erofs_inode * vi,struct erofs_map_blocks * map,int flags)682 int z_erofs_map_blocks_iter(struct erofs_inode *vi,
683 struct erofs_map_blocks *map,
684 int flags)
685 {
686 int err = 0;
687
688 /* when trying to read beyond EOF, leave it unmapped */
689 if (map->m_la >= vi->i_size) {
690 map->m_llen = map->m_la + 1 - vi->i_size;
691 map->m_la = vi->i_size;
692 map->m_flags = 0;
693 goto out;
694 }
695
696 err = z_erofs_fill_inode_lazy(vi);
697 if (err)
698 goto out;
699
700 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
701 !vi->z_tailextent_headlcn) {
702 map->m_la = 0;
703 map->m_llen = vi->i_size;
704 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
705 EROFS_MAP_FRAGMENT;
706 goto out;
707 }
708
709 err = z_erofs_do_map_blocks(vi, map, flags);
710 out:
711 DBG_BUGON(err < 0 && err != -ENOMEM);
712 return err;
713 }
714