xref: /aosp_15_r20/external/erofs-utils/lib/inode.c (revision 33b1fccf6a0fada2c2875d400ed01119b7676ee5)
1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3  * Copyright (C) 2018-2019 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Li Guifu <[email protected]>
6  * with heavy changes by Gao Xiang <[email protected]>
7  */
8 #define _GNU_SOURCE
9 #include <string.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <sys/stat.h>
13 #include <config.h>
14 #if defined(HAVE_SYS_SYSMACROS_H)
15 #include <sys/sysmacros.h>
16 #endif
17 #include <dirent.h>
18 #include "erofs/print.h"
19 #include "erofs/diskbuf.h"
20 #include "erofs/inode.h"
21 #include "erofs/cache.h"
22 #include "erofs/compress.h"
23 #include "erofs/xattr.h"
24 #include "erofs/exclude.h"
25 #include "erofs/block_list.h"
26 #include "erofs/compress_hints.h"
27 #include "erofs/blobchunk.h"
28 #include "erofs/fragments.h"
29 #include "liberofs_private.h"
30 
31 #define S_SHIFT                 12
32 static unsigned char erofs_ftype_by_mode[S_IFMT >> S_SHIFT] = {
33 	[S_IFREG >> S_SHIFT]  = EROFS_FT_REG_FILE,
34 	[S_IFDIR >> S_SHIFT]  = EROFS_FT_DIR,
35 	[S_IFCHR >> S_SHIFT]  = EROFS_FT_CHRDEV,
36 	[S_IFBLK >> S_SHIFT]  = EROFS_FT_BLKDEV,
37 	[S_IFIFO >> S_SHIFT]  = EROFS_FT_FIFO,
38 	[S_IFSOCK >> S_SHIFT] = EROFS_FT_SOCK,
39 	[S_IFLNK >> S_SHIFT]  = EROFS_FT_SYMLINK,
40 };
41 
erofs_mode_to_ftype(umode_t mode)42 unsigned char erofs_mode_to_ftype(umode_t mode)
43 {
44 	return erofs_ftype_by_mode[(mode & S_IFMT) >> S_SHIFT];
45 }
46 
47 static const unsigned char erofs_dtype_by_ftype[EROFS_FT_MAX] = {
48 	[EROFS_FT_UNKNOWN]	= DT_UNKNOWN,
49 	[EROFS_FT_REG_FILE]	= DT_REG,
50 	[EROFS_FT_DIR]		= DT_DIR,
51 	[EROFS_FT_CHRDEV]	= DT_CHR,
52 	[EROFS_FT_BLKDEV]	= DT_BLK,
53 	[EROFS_FT_FIFO]		= DT_FIFO,
54 	[EROFS_FT_SOCK]		= DT_SOCK,
55 	[EROFS_FT_SYMLINK]	= DT_LNK
56 };
57 
58 static const umode_t erofs_dtype_by_umode[EROFS_FT_MAX] = {
59 	[EROFS_FT_UNKNOWN]	= S_IFMT,
60 	[EROFS_FT_REG_FILE]	= S_IFREG,
61 	[EROFS_FT_DIR]		= S_IFDIR,
62 	[EROFS_FT_CHRDEV]	= S_IFCHR,
63 	[EROFS_FT_BLKDEV]	= S_IFBLK,
64 	[EROFS_FT_FIFO]		= S_IFIFO,
65 	[EROFS_FT_SOCK]		= S_IFSOCK,
66 	[EROFS_FT_SYMLINK]	= S_IFLNK
67 };
68 
erofs_ftype_to_mode(unsigned int ftype,unsigned int perm)69 umode_t erofs_ftype_to_mode(unsigned int ftype, unsigned int perm)
70 {
71 	if (ftype >= EROFS_FT_MAX)
72 		ftype = EROFS_FT_UNKNOWN;
73 
74 	return erofs_dtype_by_umode[ftype] | perm;
75 }
76 
erofs_ftype_to_dtype(unsigned int filetype)77 unsigned char erofs_ftype_to_dtype(unsigned int filetype)
78 {
79 	if (filetype >= EROFS_FT_MAX)
80 		return DT_UNKNOWN;
81 
82 	return erofs_dtype_by_ftype[filetype];
83 }
84 
85 #define NR_INODE_HASHTABLE	16384
86 
87 struct list_head inode_hashtable[NR_INODE_HASHTABLE];
88 
erofs_inode_manager_init(void)89 void erofs_inode_manager_init(void)
90 {
91 	unsigned int i;
92 
93 	for (i = 0; i < NR_INODE_HASHTABLE; ++i)
94 		init_list_head(&inode_hashtable[i]);
95 }
96 
erofs_insert_ihash(struct erofs_inode * inode)97 void erofs_insert_ihash(struct erofs_inode *inode)
98 {
99 	unsigned int nr = (inode->i_ino[1] ^ inode->dev) % NR_INODE_HASHTABLE;
100 
101 	list_add(&inode->i_hash, &inode_hashtable[nr]);
102 }
103 
104 /* get the inode from the (source) inode # */
erofs_iget(dev_t dev,ino_t ino)105 struct erofs_inode *erofs_iget(dev_t dev, ino_t ino)
106 {
107 	struct list_head *head =
108 		&inode_hashtable[(ino ^ dev) % NR_INODE_HASHTABLE];
109 	struct erofs_inode *inode;
110 
111 	list_for_each_entry(inode, head, i_hash)
112 		if (inode->i_ino[1] == ino && inode->dev == dev)
113 			return erofs_igrab(inode);
114 	return NULL;
115 }
116 
erofs_iget_by_nid(erofs_nid_t nid)117 struct erofs_inode *erofs_iget_by_nid(erofs_nid_t nid)
118 {
119 	struct list_head *head =
120 		&inode_hashtable[nid % NR_INODE_HASHTABLE];
121 	struct erofs_inode *inode;
122 
123 	list_for_each_entry(inode, head, i_hash)
124 		if (inode->nid == nid)
125 			return erofs_igrab(inode);
126 	return NULL;
127 }
128 
erofs_iput(struct erofs_inode * inode)129 unsigned int erofs_iput(struct erofs_inode *inode)
130 {
131 	struct erofs_dentry *d, *t;
132 	unsigned long got = erofs_atomic_dec_return(&inode->i_count);
133 
134 	if (got >= 1)
135 		return got;
136 
137 	list_for_each_entry_safe(d, t, &inode->i_subdirs, d_child)
138 		free(d);
139 
140 	free(inode->compressmeta);
141 	if (inode->eof_tailraw)
142 		free(inode->eof_tailraw);
143 	list_del(&inode->i_hash);
144 	if (inode->i_srcpath)
145 		free(inode->i_srcpath);
146 
147 	if (inode->datasource == EROFS_INODE_DATA_SOURCE_DISKBUF) {
148 		erofs_diskbuf_close(inode->i_diskbuf);
149 		free(inode->i_diskbuf);
150 	} else if (inode->i_link) {
151 		free(inode->i_link);
152 	}
153 	free(inode);
154 	return 0;
155 }
156 
erofs_d_alloc(struct erofs_inode * parent,const char * name)157 struct erofs_dentry *erofs_d_alloc(struct erofs_inode *parent,
158 				   const char *name)
159 {
160 	struct erofs_dentry *d = malloc(sizeof(*d));
161 
162 	if (!d)
163 		return ERR_PTR(-ENOMEM);
164 
165 	strncpy(d->name, name, EROFS_NAME_LEN - 1);
166 	d->name[EROFS_NAME_LEN - 1] = '\0';
167 	d->inode = NULL;
168 	d->type = EROFS_FT_UNKNOWN;
169 	d->validnid = false;
170 	list_add_tail(&d->d_child, &parent->i_subdirs);
171 	return d;
172 }
173 
174 /* allocate main data for a inode */
__allocate_inode_bh_data(struct erofs_inode * inode,unsigned long nblocks,int type)175 static int __allocate_inode_bh_data(struct erofs_inode *inode,
176 				    unsigned long nblocks,
177 				    int type)
178 {
179 	struct erofs_bufmgr *bmgr = inode->sbi->bmgr;
180 	struct erofs_buffer_head *bh;
181 	int ret;
182 
183 	if (!nblocks) {
184 		/* it has only tail-end data */
185 		inode->u.i_blkaddr = NULL_ADDR;
186 		return 0;
187 	}
188 
189 	/* allocate main data buffer */
190 	bh = erofs_balloc(bmgr, type, erofs_pos(inode->sbi, nblocks), 0, 0);
191 	if (IS_ERR(bh))
192 		return PTR_ERR(bh);
193 
194 	bh->op = &erofs_skip_write_bhops;
195 	inode->bh_data = bh;
196 
197 	/* get blkaddr of the bh */
198 	ret = erofs_mapbh(NULL, bh->block);
199 	DBG_BUGON(ret < 0);
200 
201 	/* write blocks except for the tail-end block */
202 	inode->u.i_blkaddr = bh->block->blkaddr;
203 	return 0;
204 }
205 
comp_subdir(const void * a,const void * b)206 static int comp_subdir(const void *a, const void *b)
207 {
208 	const struct erofs_dentry *da, *db;
209 
210 	da = *((const struct erofs_dentry **)a);
211 	db = *((const struct erofs_dentry **)b);
212 	return strcmp(da->name, db->name);
213 }
214 
erofs_init_empty_dir(struct erofs_inode * dir)215 int erofs_init_empty_dir(struct erofs_inode *dir)
216 {
217 	struct erofs_dentry *d;
218 
219 	/* dot is pointed to the current dir inode */
220 	d = erofs_d_alloc(dir, ".");
221 	if (IS_ERR(d))
222 		return PTR_ERR(d);
223 	d->inode = erofs_igrab(dir);
224 	d->type = EROFS_FT_DIR;
225 
226 	/* dotdot is pointed to the parent dir */
227 	d = erofs_d_alloc(dir, "..");
228 	if (IS_ERR(d))
229 		return PTR_ERR(d);
230 	d->inode = erofs_igrab(erofs_parent_inode(dir));
231 	d->type = EROFS_FT_DIR;
232 
233 	dir->i_nlink = 2;
234 	return 0;
235 }
236 
erofs_prepare_dir_file(struct erofs_inode * dir,unsigned int nr_subdirs)237 static int erofs_prepare_dir_file(struct erofs_inode *dir,
238 				  unsigned int nr_subdirs)
239 {
240 	struct erofs_sb_info *sbi = dir->sbi;
241 	struct erofs_dentry *d, *n, **sorted_d;
242 	unsigned int i;
243 	unsigned int d_size = 0;
244 
245 	sorted_d = malloc(nr_subdirs * sizeof(d));
246 	if (!sorted_d)
247 		return -ENOMEM;
248 	i = 0;
249 	list_for_each_entry_safe(d, n, &dir->i_subdirs, d_child) {
250 		list_del(&d->d_child);
251 		sorted_d[i++] = d;
252 	}
253 	DBG_BUGON(i != nr_subdirs);
254 	qsort(sorted_d, nr_subdirs, sizeof(d), comp_subdir);
255 	for (i = 0; i < nr_subdirs; i++)
256 		list_add_tail(&sorted_d[i]->d_child, &dir->i_subdirs);
257 	free(sorted_d);
258 
259 	/* let's calculate dir size */
260 	list_for_each_entry(d, &dir->i_subdirs, d_child) {
261 		int len = strlen(d->name) + sizeof(struct erofs_dirent);
262 
263 		if (erofs_blkoff(sbi, d_size) + len > erofs_blksiz(sbi))
264 			d_size = round_up(d_size, erofs_blksiz(sbi));
265 		d_size += len;
266 	}
267 	dir->i_size = d_size;
268 
269 	/* no compression for all dirs */
270 	dir->datalayout = EROFS_INODE_FLAT_INLINE;
271 
272 	/* it will be used in erofs_prepare_inode_buffer */
273 	dir->idata_size = d_size % erofs_blksiz(sbi);
274 	return 0;
275 }
276 
fill_dirblock(char * buf,unsigned int size,unsigned int q,struct erofs_dentry * head,struct erofs_dentry * end)277 static void fill_dirblock(char *buf, unsigned int size, unsigned int q,
278 			  struct erofs_dentry *head, struct erofs_dentry *end)
279 {
280 	unsigned int p = 0;
281 
282 	/* write out all erofs_dirents + filenames */
283 	while (head != end) {
284 		const unsigned int namelen = strlen(head->name);
285 		struct erofs_dirent d = {
286 			.nid = cpu_to_le64(head->nid),
287 			.nameoff = cpu_to_le16(q),
288 			.file_type = head->type,
289 		};
290 
291 		memcpy(buf + p, &d, sizeof(d));
292 		memcpy(buf + q, head->name, namelen);
293 		p += sizeof(d);
294 		q += namelen;
295 
296 		head = list_next_entry(head, d_child);
297 	}
298 	memset(buf + q, 0, size - q);
299 }
300 
write_dirblock(struct erofs_sb_info * sbi,unsigned int q,struct erofs_dentry * head,struct erofs_dentry * end,erofs_blk_t blkaddr)301 static int write_dirblock(struct erofs_sb_info *sbi,
302 			  unsigned int q, struct erofs_dentry *head,
303 			  struct erofs_dentry *end, erofs_blk_t blkaddr)
304 {
305 	char buf[EROFS_MAX_BLOCK_SIZE];
306 
307 	fill_dirblock(buf, erofs_blksiz(sbi), q, head, end);
308 	return erofs_blk_write(sbi, buf, blkaddr, 1);
309 }
310 
erofs_lookupnid(struct erofs_inode * inode)311 erofs_nid_t erofs_lookupnid(struct erofs_inode *inode)
312 {
313 	struct erofs_buffer_head *const bh = inode->bh;
314 	struct erofs_sb_info *sbi = inode->sbi;
315 	erofs_off_t off, meta_offset;
316 
317 	if (bh && (long long)inode->nid <= 0) {
318 		erofs_mapbh(NULL, bh->block);
319 		off = erofs_btell(bh, false);
320 
321 		meta_offset = erofs_pos(sbi, sbi->meta_blkaddr);
322 		DBG_BUGON(off < meta_offset);
323 		inode->nid = (off - meta_offset) >> EROFS_ISLOTBITS;
324 		erofs_dbg("Assign nid %llu to file %s (mode %05o)",
325 			  inode->nid, inode->i_srcpath, inode->i_mode);
326 	}
327 	if (__erofs_unlikely(IS_ROOT(inode)) && inode->nid > 0xffff)
328 		return sbi->root_nid;
329 	return inode->nid;
330 }
331 
erofs_d_invalidate(struct erofs_dentry * d)332 static void erofs_d_invalidate(struct erofs_dentry *d)
333 {
334 	struct erofs_inode *const inode = d->inode;
335 
336 	if (d->validnid)
337 		return;
338 	d->nid = erofs_lookupnid(inode);
339 	d->validnid = true;
340 	erofs_iput(inode);
341 }
342 
erofs_rebuild_inode_fix_pnid(struct erofs_inode * parent,erofs_nid_t nid)343 static int erofs_rebuild_inode_fix_pnid(struct erofs_inode *parent,
344 					erofs_nid_t nid)
345 {
346 	struct erofs_inode dir = {
347 		.sbi = parent->sbi,
348 		.nid = nid
349 	};
350 	unsigned int bsz = erofs_blksiz(dir.sbi);
351 	unsigned int err, isz;
352 	erofs_off_t boff, off;
353 	erofs_nid_t pnid;
354 	bool fixed = false;
355 
356 	err = erofs_read_inode_from_disk(&dir);
357 	if (err)
358 		return err;
359 
360 	if (!S_ISDIR(dir.i_mode))
361 		return -ENOTDIR;
362 
363 	if (dir.datalayout != EROFS_INODE_FLAT_INLINE &&
364 	    dir.datalayout != EROFS_INODE_FLAT_PLAIN)
365 		return -EOPNOTSUPP;
366 
367 	pnid = erofs_lookupnid(parent);
368 	isz = dir.inode_isize + dir.xattr_isize;
369 	boff = erofs_pos(dir.sbi, dir.u.i_blkaddr);
370 	for (off = 0; off < dir.i_size; off += bsz) {
371 		char buf[EROFS_MAX_BLOCK_SIZE];
372 		struct erofs_dirent *de = (struct erofs_dirent *)buf;
373 		unsigned int nameoff, count, de_nameoff;
374 
375 		count = min_t(erofs_off_t, bsz, dir.i_size - off);
376 		err = erofs_pread(&dir, buf, count, off);
377 		if (err)
378 			return err;
379 
380 		nameoff = le16_to_cpu(de->nameoff);
381 		if (nameoff < sizeof(struct erofs_dirent) ||
382 		    nameoff >= count) {
383 			erofs_err("invalid de[0].nameoff %u @ nid %llu, offset %llu",
384 				  nameoff, dir.nid | 0ULL, off | 0ULL);
385 			return -EFSCORRUPTED;
386 		}
387 
388 		while ((char *)de < buf + nameoff) {
389 			de_nameoff = le16_to_cpu(de->nameoff);
390 			if (((char *)(de + 1) >= buf + nameoff ?
391 				strnlen(buf + de_nameoff, count - de_nameoff) == 2 :
392 				le16_to_cpu(de[1].nameoff) == de_nameoff + 2) &&
393 			   !memcmp(buf + de_nameoff, "..", 2)) {
394 				if (de->nid == cpu_to_le64(pnid))
395 					return 0;
396 				de->nid = cpu_to_le64(pnid);
397 				fixed = true;
398 				break;
399 			}
400 			++de;
401 		}
402 
403 		if (!fixed)
404 			continue;
405 		err = erofs_dev_write(dir.sbi, buf,
406 			(off + bsz > dir.i_size &&
407 				dir.datalayout == EROFS_INODE_FLAT_INLINE ?
408 				erofs_iloc(&dir) + isz : boff + off), count);
409 		erofs_dbg("directory %llu pNID is updated to %llu",
410 			  nid | 0ULL, pnid | 0ULL);
411 		break;
412 	}
413 	if (err || fixed)
414 		return err;
415 
416 	erofs_err("directory data %llu is corrupted (\"..\" not found)",
417 		  nid | 0ULL);
418 	return -EFSCORRUPTED;
419 }
420 
erofs_write_dir_file(struct erofs_inode * dir)421 static int erofs_write_dir_file(struct erofs_inode *dir)
422 {
423 	struct erofs_dentry *head = list_first_entry(&dir->i_subdirs,
424 						     struct erofs_dentry,
425 						     d_child);
426 	struct erofs_sb_info *sbi = dir->sbi;
427 	struct erofs_dentry *d;
428 	int ret;
429 	unsigned int q, used, blkno;
430 
431 	q = used = blkno = 0;
432 
433 	/* allocate dir main data */
434 	ret = __allocate_inode_bh_data(dir, erofs_blknr(sbi, dir->i_size), DIRA);
435 	if (ret)
436 		return ret;
437 
438 	list_for_each_entry(d, &dir->i_subdirs, d_child) {
439 		const unsigned int len = strlen(d->name) +
440 			sizeof(struct erofs_dirent);
441 
442 		/* XXX: a bit hacky, but to avoid another traversal */
443 		if (d->validnid && d->type == EROFS_FT_DIR) {
444 			ret = erofs_rebuild_inode_fix_pnid(dir, d->nid);
445 			if (ret)
446 				return ret;
447 		}
448 
449 		erofs_d_invalidate(d);
450 		if (used + len > erofs_blksiz(sbi)) {
451 			ret = write_dirblock(sbi, q, head, d,
452 					     dir->u.i_blkaddr + blkno);
453 			if (ret)
454 				return ret;
455 
456 			head = d;
457 			q = used = 0;
458 			++blkno;
459 		}
460 		used += len;
461 		q += sizeof(struct erofs_dirent);
462 	}
463 
464 	DBG_BUGON(used > erofs_blksiz(sbi));
465 	if (used == erofs_blksiz(sbi)) {
466 		DBG_BUGON(dir->i_size % erofs_blksiz(sbi));
467 		DBG_BUGON(dir->idata_size);
468 		return write_dirblock(sbi, q, head, d, dir->u.i_blkaddr + blkno);
469 	}
470 	DBG_BUGON(used != dir->i_size % erofs_blksiz(sbi));
471 	if (used) {
472 		/* fill tail-end dir block */
473 		dir->idata = malloc(used);
474 		if (!dir->idata)
475 			return -ENOMEM;
476 		DBG_BUGON(used != dir->idata_size);
477 		fill_dirblock(dir->idata, dir->idata_size, q, head, d);
478 	}
479 	return 0;
480 }
481 
erofs_write_file_from_buffer(struct erofs_inode * inode,char * buf)482 int erofs_write_file_from_buffer(struct erofs_inode *inode, char *buf)
483 {
484 	struct erofs_sb_info *sbi = inode->sbi;
485 	const unsigned int nblocks = erofs_blknr(sbi, inode->i_size);
486 	int ret;
487 
488 	inode->datalayout = EROFS_INODE_FLAT_INLINE;
489 
490 	ret = __allocate_inode_bh_data(inode, nblocks, DATA);
491 	if (ret)
492 		return ret;
493 
494 	if (nblocks)
495 		erofs_blk_write(sbi, buf, inode->u.i_blkaddr, nblocks);
496 	inode->idata_size = inode->i_size % erofs_blksiz(sbi);
497 	if (inode->idata_size) {
498 		inode->idata = malloc(inode->idata_size);
499 		if (!inode->idata)
500 			return -ENOMEM;
501 		memcpy(inode->idata, buf + erofs_pos(sbi, nblocks),
502 		       inode->idata_size);
503 	}
504 	return 0;
505 }
506 
507 /* rules to decide whether a file could be compressed or not */
erofs_file_is_compressible(struct erofs_inode * inode)508 static bool erofs_file_is_compressible(struct erofs_inode *inode)
509 {
510 	if (cfg.c_compress_hints_file)
511 		return z_erofs_apply_compress_hints(inode);
512 	return true;
513 }
514 
write_uncompressed_file_from_fd(struct erofs_inode * inode,int fd)515 static int write_uncompressed_file_from_fd(struct erofs_inode *inode, int fd)
516 {
517 	int ret;
518 	erofs_blk_t nblocks, i;
519 	unsigned int len;
520 	struct erofs_sb_info *sbi = inode->sbi;
521 
522 	inode->datalayout = EROFS_INODE_FLAT_INLINE;
523 	nblocks = inode->i_size >> sbi->blkszbits;
524 
525 	ret = __allocate_inode_bh_data(inode, nblocks, DATA);
526 	if (ret)
527 		return ret;
528 
529 	for (i = 0; i < nblocks; i += (len >> sbi->blkszbits)) {
530 		len = min_t(u64, round_down(UINT_MAX, 1U << sbi->blkszbits),
531 			    erofs_pos(sbi, nblocks - i));
532 		ret = erofs_io_xcopy(&sbi->bdev,
533 				     erofs_pos(sbi, inode->u.i_blkaddr + i),
534 				     &((struct erofs_vfile){ .fd = fd }), len,
535 			inode->datasource == EROFS_INODE_DATA_SOURCE_DISKBUF);
536 		if (ret)
537 			return ret;
538 	}
539 
540 	/* read the tail-end data */
541 	inode->idata_size = inode->i_size % erofs_blksiz(sbi);
542 	if (inode->idata_size) {
543 		inode->idata = malloc(inode->idata_size);
544 		if (!inode->idata)
545 			return -ENOMEM;
546 
547 		ret = read(fd, inode->idata, inode->idata_size);
548 		if (ret < inode->idata_size) {
549 			free(inode->idata);
550 			inode->idata = NULL;
551 			return -EIO;
552 		}
553 	}
554 	erofs_droid_blocklist_write(inode, inode->u.i_blkaddr, nblocks);
555 	return 0;
556 }
557 
erofs_write_unencoded_file(struct erofs_inode * inode,int fd,u64 fpos)558 int erofs_write_unencoded_file(struct erofs_inode *inode, int fd, u64 fpos)
559 {
560 	if (cfg.c_chunkbits) {
561 		inode->u.chunkbits = cfg.c_chunkbits;
562 		/* chunk indexes when explicitly specified */
563 		inode->u.chunkformat = 0;
564 		if (cfg.c_force_chunkformat == FORCE_INODE_CHUNK_INDEXES)
565 			inode->u.chunkformat = EROFS_CHUNK_FORMAT_INDEXES;
566 		return erofs_blob_write_chunked_file(inode, fd, fpos);
567 	}
568 
569 	/* fallback to all data uncompressed */
570 	return write_uncompressed_file_from_fd(inode, fd);
571 }
572 
erofs_iflush(struct erofs_inode * inode)573 int erofs_iflush(struct erofs_inode *inode)
574 {
575 	const u16 icount = EROFS_INODE_XATTR_ICOUNT(inode->xattr_isize);
576 	struct erofs_sb_info *sbi = inode->sbi;
577 	erofs_off_t off;
578 	union {
579 		struct erofs_inode_compact dic;
580 		struct erofs_inode_extended die;
581 	} u = {};
582 	int ret;
583 
584 	if (inode->bh)
585 		off = erofs_btell(inode->bh, false);
586 	else
587 		off = erofs_iloc(inode);
588 
589 	switch (inode->inode_isize) {
590 	case sizeof(struct erofs_inode_compact):
591 		u.dic.i_format = cpu_to_le16(0 | (inode->datalayout << 1));
592 		u.dic.i_xattr_icount = cpu_to_le16(icount);
593 		u.dic.i_mode = cpu_to_le16(inode->i_mode);
594 		u.dic.i_nlink = cpu_to_le16(inode->i_nlink);
595 		u.dic.i_size = cpu_to_le32((u32)inode->i_size);
596 
597 		u.dic.i_ino = cpu_to_le32(inode->i_ino[0]);
598 
599 		u.dic.i_uid = cpu_to_le16((u16)inode->i_uid);
600 		u.dic.i_gid = cpu_to_le16((u16)inode->i_gid);
601 
602 		switch (inode->i_mode & S_IFMT) {
603 		case S_IFCHR:
604 		case S_IFBLK:
605 		case S_IFIFO:
606 		case S_IFSOCK:
607 			u.dic.i_u.rdev = cpu_to_le32(inode->u.i_rdev);
608 			break;
609 
610 		default:
611 			if (is_inode_layout_compression(inode))
612 				u.dic.i_u.compressed_blocks =
613 					cpu_to_le32(inode->u.i_blocks);
614 			else if (inode->datalayout ==
615 					EROFS_INODE_CHUNK_BASED)
616 				u.dic.i_u.c.format =
617 					cpu_to_le16(inode->u.chunkformat);
618 			else
619 				u.dic.i_u.raw_blkaddr =
620 					cpu_to_le32(inode->u.i_blkaddr);
621 			break;
622 		}
623 		break;
624 	case sizeof(struct erofs_inode_extended):
625 		u.die.i_format = cpu_to_le16(1 | (inode->datalayout << 1));
626 		u.die.i_xattr_icount = cpu_to_le16(icount);
627 		u.die.i_mode = cpu_to_le16(inode->i_mode);
628 		u.die.i_nlink = cpu_to_le32(inode->i_nlink);
629 		u.die.i_size = cpu_to_le64(inode->i_size);
630 
631 		u.die.i_ino = cpu_to_le32(inode->i_ino[0]);
632 
633 		u.die.i_uid = cpu_to_le32(inode->i_uid);
634 		u.die.i_gid = cpu_to_le32(inode->i_gid);
635 
636 		u.die.i_mtime = cpu_to_le64(inode->i_mtime);
637 		u.die.i_mtime_nsec = cpu_to_le32(inode->i_mtime_nsec);
638 
639 		switch (inode->i_mode & S_IFMT) {
640 		case S_IFCHR:
641 		case S_IFBLK:
642 		case S_IFIFO:
643 		case S_IFSOCK:
644 			u.die.i_u.rdev = cpu_to_le32(inode->u.i_rdev);
645 			break;
646 
647 		default:
648 			if (is_inode_layout_compression(inode))
649 				u.die.i_u.compressed_blocks =
650 					cpu_to_le32(inode->u.i_blocks);
651 			else if (inode->datalayout ==
652 					EROFS_INODE_CHUNK_BASED)
653 				u.die.i_u.c.format =
654 					cpu_to_le16(inode->u.chunkformat);
655 			else
656 				u.die.i_u.raw_blkaddr =
657 					cpu_to_le32(inode->u.i_blkaddr);
658 			break;
659 		}
660 		break;
661 	default:
662 		erofs_err("unsupported on-disk inode version of nid %llu",
663 			  (unsigned long long)inode->nid);
664 		BUG_ON(1);
665 	}
666 
667 	ret = erofs_dev_write(sbi, &u, off, inode->inode_isize);
668 	if (ret)
669 		return ret;
670 	off += inode->inode_isize;
671 
672 	if (inode->xattr_isize) {
673 		char *xattrs = erofs_export_xattr_ibody(inode);
674 
675 		if (IS_ERR(xattrs))
676 			return PTR_ERR(xattrs);
677 
678 		ret = erofs_dev_write(sbi, xattrs, off, inode->xattr_isize);
679 		free(xattrs);
680 		if (ret)
681 			return ret;
682 
683 		off += inode->xattr_isize;
684 	}
685 
686 	if (inode->extent_isize) {
687 		if (inode->datalayout == EROFS_INODE_CHUNK_BASED) {
688 			ret = erofs_blob_write_chunk_indexes(inode, off);
689 			if (ret)
690 				return ret;
691 		} else {
692 			/* write compression metadata */
693 			off = roundup(off, 8);
694 			ret = erofs_dev_write(sbi, inode->compressmeta, off,
695 					      inode->extent_isize);
696 			if (ret)
697 				return ret;
698 		}
699 	}
700 	return 0;
701 }
702 
erofs_bh_flush_write_inode(struct erofs_buffer_head * bh)703 static int erofs_bh_flush_write_inode(struct erofs_buffer_head *bh)
704 {
705 	struct erofs_inode *inode = bh->fsprivate;
706 	int ret;
707 
708 	DBG_BUGON(inode->bh != bh);
709 	ret = erofs_iflush(inode);
710 	if (ret)
711 		return ret;
712 	inode->bh = NULL;
713 	erofs_iput(inode);
714 	return erofs_bh_flush_generic_end(bh);
715 }
716 
717 static struct erofs_bhops erofs_write_inode_bhops = {
718 	.flush = erofs_bh_flush_write_inode,
719 };
720 
erofs_prepare_tail_block(struct erofs_inode * inode)721 static int erofs_prepare_tail_block(struct erofs_inode *inode)
722 {
723 	struct erofs_sb_info *sbi = inode->sbi;
724 	struct erofs_buffer_head *bh;
725 	int ret;
726 
727 	if (!inode->idata_size)
728 		return 0;
729 
730 	bh = inode->bh_data;
731 	if (bh) {
732 		/* expend a block as the tail block (should be successful) */
733 		ret = erofs_bh_balloon(bh, erofs_blksiz(sbi));
734 		if (ret != erofs_blksiz(sbi)) {
735 			DBG_BUGON(1);
736 			return -EIO;
737 		}
738 	} else {
739 		inode->lazy_tailblock = true;
740 	}
741 	if (is_inode_layout_compression(inode))
742 		inode->u.i_blocks += 1;
743 	return 0;
744 }
745 
erofs_prepare_inode_buffer(struct erofs_inode * inode)746 static int erofs_prepare_inode_buffer(struct erofs_inode *inode)
747 {
748 	struct erofs_bufmgr *bmgr = inode->sbi->bmgr;
749 	unsigned int inodesize;
750 	struct erofs_buffer_head *bh, *ibh;
751 
752 	DBG_BUGON(inode->bh || inode->bh_inline);
753 
754 	inodesize = inode->inode_isize + inode->xattr_isize;
755 	if (inode->extent_isize)
756 		inodesize = roundup(inodesize, 8) + inode->extent_isize;
757 
758 	if (inode->datalayout == EROFS_INODE_FLAT_PLAIN)
759 		goto noinline;
760 
761 	/* TODO: tailpacking inline of chunk-based format isn't finalized */
762 	if (inode->datalayout == EROFS_INODE_CHUNK_BASED)
763 		goto noinline;
764 
765 	if (!is_inode_layout_compression(inode)) {
766 		if (!cfg.c_inline_data && S_ISREG(inode->i_mode)) {
767 			inode->datalayout = EROFS_INODE_FLAT_PLAIN;
768 			goto noinline;
769 		}
770 		/*
771 		 * If the file sizes of uncompressed files are block-aligned,
772 		 * should use the EROFS_INODE_FLAT_PLAIN data layout.
773 		 */
774 		if (!inode->idata_size)
775 			inode->datalayout = EROFS_INODE_FLAT_PLAIN;
776 	}
777 
778 	bh = erofs_balloc(bmgr, INODE, inodesize, 0, inode->idata_size);
779 	if (bh == ERR_PTR(-ENOSPC)) {
780 		int ret;
781 
782 		if (is_inode_layout_compression(inode))
783 			z_erofs_drop_inline_pcluster(inode);
784 		else
785 			inode->datalayout = EROFS_INODE_FLAT_PLAIN;
786 noinline:
787 		/* expend an extra block for tail-end data */
788 		ret = erofs_prepare_tail_block(inode);
789 		if (ret)
790 			return ret;
791 		bh = erofs_balloc(bmgr, INODE, inodesize, 0, 0);
792 		if (IS_ERR(bh))
793 			return PTR_ERR(bh);
794 		DBG_BUGON(inode->bh_inline);
795 	} else if (IS_ERR(bh)) {
796 		return PTR_ERR(bh);
797 	} else if (inode->idata_size) {
798 		if (is_inode_layout_compression(inode)) {
799 			DBG_BUGON(!cfg.c_ztailpacking);
800 			erofs_dbg("Inline %scompressed data (%u bytes) to %s",
801 				  inode->compressed_idata ? "" : "un",
802 				  inode->idata_size, inode->i_srcpath);
803 			erofs_sb_set_ztailpacking(inode->sbi);
804 		} else {
805 			inode->datalayout = EROFS_INODE_FLAT_INLINE;
806 			erofs_dbg("Inline tail-end data (%u bytes) to %s",
807 				  inode->idata_size, inode->i_srcpath);
808 		}
809 
810 		/* allocate inline buffer */
811 		ibh = erofs_battach(bh, META, inode->idata_size);
812 		if (IS_ERR(ibh))
813 			return PTR_ERR(ibh);
814 
815 		ibh->op = &erofs_skip_write_bhops;
816 		inode->bh_inline = ibh;
817 	}
818 
819 	bh->fsprivate = erofs_igrab(inode);
820 	bh->op = &erofs_write_inode_bhops;
821 	inode->bh = bh;
822 	return 0;
823 }
824 
erofs_bh_flush_write_inline(struct erofs_buffer_head * bh)825 static int erofs_bh_flush_write_inline(struct erofs_buffer_head *bh)
826 {
827 	struct erofs_inode *const inode = bh->fsprivate;
828 	const erofs_off_t off = erofs_btell(bh, false);
829 	int ret;
830 
831 	ret = erofs_dev_write(inode->sbi, inode->idata, off, inode->idata_size);
832 	if (ret)
833 		return ret;
834 
835 	free(inode->idata);
836 	inode->idata = NULL;
837 
838 	erofs_iput(inode);
839 	return erofs_bh_flush_generic_end(bh);
840 }
841 
842 static struct erofs_bhops erofs_write_inline_bhops = {
843 	.flush = erofs_bh_flush_write_inline,
844 };
845 
erofs_write_tail_end(struct erofs_inode * inode)846 static int erofs_write_tail_end(struct erofs_inode *inode)
847 {
848 	struct erofs_sb_info *sbi = inode->sbi;
849 	struct erofs_buffer_head *bh, *ibh;
850 
851 	bh = inode->bh_data;
852 
853 	if (!inode->idata_size)
854 		goto out;
855 
856 	DBG_BUGON(!inode->idata);
857 	/* have enough room to inline data */
858 	if (inode->bh_inline) {
859 		ibh = inode->bh_inline;
860 
861 		ibh->fsprivate = erofs_igrab(inode);
862 		ibh->op = &erofs_write_inline_bhops;
863 
864 		erofs_droid_blocklist_write_tail_end(inode, NULL_ADDR);
865 	} else {
866 		int ret;
867 		erofs_off_t pos, zero_pos;
868 
869 		if (!bh) {
870 			bh = erofs_balloc(sbi->bmgr, DATA,
871 					  erofs_blksiz(sbi), 0, 0);
872 			if (IS_ERR(bh))
873 				return PTR_ERR(bh);
874 			bh->op = &erofs_skip_write_bhops;
875 
876 			/* get blkaddr of bh */
877 			ret = erofs_mapbh(NULL, bh->block);
878 			inode->u.i_blkaddr = bh->block->blkaddr;
879 			inode->bh_data = bh;
880 		} else {
881 			if (inode->lazy_tailblock) {
882 				/* expend a tail block (should be successful) */
883 				ret = erofs_bh_balloon(bh, erofs_blksiz(sbi));
884 				if (ret != erofs_blksiz(sbi)) {
885 					DBG_BUGON(1);
886 					return -EIO;
887 				}
888 				inode->lazy_tailblock = false;
889 			}
890 			ret = erofs_mapbh(NULL, bh->block);
891 		}
892 		DBG_BUGON(ret < 0);
893 		pos = erofs_btell(bh, true) - erofs_blksiz(sbi);
894 
895 		/* 0'ed data should be padded at head for 0padding conversion */
896 		if (erofs_sb_has_lz4_0padding(sbi) && inode->compressed_idata) {
897 			zero_pos = pos;
898 			pos += erofs_blksiz(sbi) - inode->idata_size;
899 		} else {
900 			/* pad 0'ed data for the other cases */
901 			zero_pos = pos + inode->idata_size;
902 		}
903 		ret = erofs_dev_write(sbi, inode->idata, pos, inode->idata_size);
904 		if (ret)
905 			return ret;
906 
907 		DBG_BUGON(inode->idata_size > erofs_blksiz(sbi));
908 		if (inode->idata_size < erofs_blksiz(sbi)) {
909 			ret = erofs_dev_fillzero(sbi, zero_pos,
910 					   erofs_blksiz(sbi) - inode->idata_size,
911 					   false);
912 			if (ret)
913 				return ret;
914 		}
915 		inode->idata_size = 0;
916 		free(inode->idata);
917 		inode->idata = NULL;
918 
919 		erofs_droid_blocklist_write_tail_end(inode, erofs_blknr(sbi, pos));
920 	}
921 out:
922 	/* now bh_data can drop directly */
923 	if (bh) {
924 		/*
925 		 * Don't leave DATA buffers which were written in the global
926 		 * buffer list. It will make balloc() slowly.
927 		 */
928 		erofs_bdrop(bh, false);
929 		inode->bh_data = NULL;
930 	}
931 	return 0;
932 }
933 
erofs_should_use_inode_extended(struct erofs_inode * inode)934 static bool erofs_should_use_inode_extended(struct erofs_inode *inode)
935 {
936 	if (cfg.c_force_inodeversion == FORCE_INODE_EXTENDED)
937 		return true;
938 	if (inode->i_size > UINT_MAX)
939 		return true;
940 	if (erofs_is_packed_inode(inode))
941 		return false;
942 	if (inode->i_uid > USHRT_MAX)
943 		return true;
944 	if (inode->i_gid > USHRT_MAX)
945 		return true;
946 	if (inode->i_nlink > USHRT_MAX)
947 		return true;
948 	if ((inode->i_mtime != inode->sbi->build_time ||
949 	     inode->i_mtime_nsec != inode->sbi->build_time_nsec) &&
950 	    !cfg.c_ignore_mtime)
951 		return true;
952 	return false;
953 }
954 
erofs_new_encode_dev(dev_t dev)955 u32 erofs_new_encode_dev(dev_t dev)
956 {
957 	const unsigned int major = major(dev);
958 	const unsigned int minor = minor(dev);
959 
960 	return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
961 }
962 
963 #ifdef WITH_ANDROID
erofs_droid_inode_fsconfig(struct erofs_inode * inode,struct stat * st,const char * path)964 int erofs_droid_inode_fsconfig(struct erofs_inode *inode,
965 			       struct stat *st,
966 			       const char *path)
967 {
968 	/* filesystem_config does not preserve file type bits */
969 	mode_t stat_file_type_mask = st->st_mode & S_IFMT;
970 	unsigned int uid = 0, gid = 0, mode = 0;
971 	const char *fspath;
972 	char *decorated = NULL;
973 
974 	inode->capabilities = 0;
975 	if (!cfg.fs_config_file && !cfg.mount_point)
976 		return 0;
977 	/* avoid loading special inodes */
978 	if (path == EROFS_PACKED_INODE)
979 		return 0;
980 
981 	if (!cfg.mount_point ||
982 	/* have to drop the mountpoint for rootdir of canned fsconfig */
983 	    (cfg.fs_config_file && erofs_fspath(path)[0] == '\0')) {
984 		fspath = erofs_fspath(path);
985 	} else {
986 		if (asprintf(&decorated, "%s/%s", cfg.mount_point,
987 			     erofs_fspath(path)) <= 0)
988 			return -ENOMEM;
989 		fspath = decorated;
990 	}
991 
992 	if (cfg.fs_config_file)
993 		canned_fs_config(fspath, S_ISDIR(st->st_mode),
994 				 cfg.target_out_path,
995 				 &uid, &gid, &mode, &inode->capabilities);
996 	else
997 		fs_config(fspath, S_ISDIR(st->st_mode),
998 			  cfg.target_out_path,
999 			  &uid, &gid, &mode, &inode->capabilities);
1000 
1001 	erofs_dbg("/%s -> mode = 0x%x, uid = 0x%x, gid = 0x%x, capabilities = 0x%" PRIx64,
1002 		  fspath, mode, uid, gid, inode->capabilities);
1003 
1004 	if (decorated)
1005 		free(decorated);
1006 	st->st_uid = uid;
1007 	st->st_gid = gid;
1008 	st->st_mode = mode | stat_file_type_mask;
1009 	return 0;
1010 }
1011 #else
erofs_droid_inode_fsconfig(struct erofs_inode * inode,struct stat * st,const char * path)1012 static int erofs_droid_inode_fsconfig(struct erofs_inode *inode,
1013 				      struct stat *st,
1014 				      const char *path)
1015 {
1016 	return 0;
1017 }
1018 #endif
1019 
__erofs_fill_inode(struct erofs_inode * inode,struct stat * st,const char * path)1020 int __erofs_fill_inode(struct erofs_inode *inode, struct stat *st,
1021 		       const char *path)
1022 {
1023 	int err = erofs_droid_inode_fsconfig(inode, st, path);
1024 	struct erofs_sb_info *sbi = inode->sbi;
1025 
1026 	if (err)
1027 		return err;
1028 
1029 	inode->i_uid = cfg.c_uid == -1 ? st->st_uid : cfg.c_uid;
1030 	inode->i_gid = cfg.c_gid == -1 ? st->st_gid : cfg.c_gid;
1031 
1032 	if (inode->i_uid + cfg.c_uid_offset < 0)
1033 		erofs_err("uid overflow @ %s", path);
1034 	inode->i_uid += cfg.c_uid_offset;
1035 
1036 	if (inode->i_gid + cfg.c_gid_offset < 0)
1037 		erofs_err("gid overflow @ %s", path);
1038 	inode->i_gid += cfg.c_gid_offset;
1039 
1040 	inode->i_mtime = st->st_mtime;
1041 	inode->i_mtime_nsec = ST_MTIM_NSEC(st);
1042 
1043 	switch (cfg.c_timeinherit) {
1044 	case TIMESTAMP_CLAMPING:
1045 		if (inode->i_mtime < sbi->build_time)
1046 			break;
1047 	case TIMESTAMP_FIXED:
1048 		inode->i_mtime = sbi->build_time;
1049 		inode->i_mtime_nsec = sbi->build_time_nsec;
1050 	default:
1051 		break;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
erofs_fill_inode(struct erofs_inode * inode,struct stat * st,const char * path)1057 static int erofs_fill_inode(struct erofs_inode *inode, struct stat *st,
1058 			    const char *path)
1059 {
1060 	int err = __erofs_fill_inode(inode, st, path);
1061 
1062 	if (err)
1063 		return err;
1064 
1065 	inode->i_mode = st->st_mode;
1066 	inode->i_nlink = 1;	/* fix up later if needed */
1067 
1068 	switch (inode->i_mode & S_IFMT) {
1069 	case S_IFCHR:
1070 	case S_IFBLK:
1071 	case S_IFIFO:
1072 	case S_IFSOCK:
1073 		inode->u.i_rdev = erofs_new_encode_dev(st->st_rdev);
1074 	case S_IFDIR:
1075 		inode->i_size = 0;
1076 		break;
1077 	case S_IFREG:
1078 	case S_IFLNK:
1079 		inode->i_size = st->st_size;
1080 		break;
1081 	default:
1082 		return -EINVAL;
1083 	}
1084 
1085 	inode->i_srcpath = strdup(path);
1086 	if (!inode->i_srcpath)
1087 		return -ENOMEM;
1088 
1089 	if (erofs_should_use_inode_extended(inode)) {
1090 		if (cfg.c_force_inodeversion == FORCE_INODE_COMPACT) {
1091 			erofs_err("file %s cannot be in compact form",
1092 				  inode->i_srcpath);
1093 			return -EINVAL;
1094 		}
1095 		inode->inode_isize = sizeof(struct erofs_inode_extended);
1096 	} else {
1097 		inode->inode_isize = sizeof(struct erofs_inode_compact);
1098 	}
1099 
1100 	inode->dev = st->st_dev;
1101 	inode->i_ino[1] = st->st_ino;
1102 	erofs_insert_ihash(inode);
1103 	return 0;
1104 }
1105 
erofs_new_inode(struct erofs_sb_info * sbi)1106 struct erofs_inode *erofs_new_inode(struct erofs_sb_info *sbi)
1107 {
1108 	struct erofs_inode *inode;
1109 
1110 	inode = calloc(1, sizeof(struct erofs_inode));
1111 	if (!inode)
1112 		return ERR_PTR(-ENOMEM);
1113 
1114 	inode->sbi = sbi;
1115 	inode->i_ino[0] = sbi->inos++;	/* inode serial number */
1116 	inode->i_count = 1;
1117 	inode->datalayout = EROFS_INODE_FLAT_PLAIN;
1118 
1119 	init_list_head(&inode->i_hash);
1120 	init_list_head(&inode->i_subdirs);
1121 	init_list_head(&inode->i_xattrs);
1122 	return inode;
1123 }
1124 
1125 /* get the inode from the source path */
erofs_iget_from_srcpath(struct erofs_sb_info * sbi,const char * path)1126 static struct erofs_inode *erofs_iget_from_srcpath(struct erofs_sb_info *sbi,
1127 						   const char *path)
1128 {
1129 	struct stat st;
1130 	struct erofs_inode *inode;
1131 	int ret;
1132 
1133 	ret = lstat(path, &st);
1134 	if (ret)
1135 		return ERR_PTR(-errno);
1136 
1137 	/*
1138 	 * lookup in hash table first, if it already exists we have a
1139 	 * hard-link, just return it. Also don't lookup for directories
1140 	 * since hard-link directory isn't allowed.
1141 	 */
1142 	if (!S_ISDIR(st.st_mode)) {
1143 		inode = erofs_iget(st.st_dev, st.st_ino);
1144 		if (inode)
1145 			return inode;
1146 	}
1147 
1148 	/* cannot find in the inode cache */
1149 	inode = erofs_new_inode(sbi);
1150 	if (IS_ERR(inode))
1151 		return inode;
1152 
1153 	ret = erofs_fill_inode(inode, &st, path);
1154 	if (ret) {
1155 		erofs_iput(inode);
1156 		return ERR_PTR(ret);
1157 	}
1158 	return inode;
1159 }
1160 
erofs_fixup_meta_blkaddr(struct erofs_inode * rootdir)1161 static void erofs_fixup_meta_blkaddr(struct erofs_inode *rootdir)
1162 {
1163 	const erofs_off_t rootnid_maxoffset = 0xffff << EROFS_ISLOTBITS;
1164 	struct erofs_buffer_head *const bh = rootdir->bh;
1165 	struct erofs_sb_info *sbi = rootdir->sbi;
1166 	erofs_off_t off, meta_offset;
1167 
1168 	erofs_mapbh(NULL, bh->block);
1169 	off = erofs_btell(bh, false);
1170 
1171 	if (off > rootnid_maxoffset)
1172 		meta_offset = round_up(off - rootnid_maxoffset, erofs_blksiz(sbi));
1173 	else
1174 		meta_offset = 0;
1175 	sbi->meta_blkaddr = erofs_blknr(sbi, meta_offset);
1176 	rootdir->nid = (off - meta_offset) >> EROFS_ISLOTBITS;
1177 }
1178 
erofs_inode_reserve_data_blocks(struct erofs_inode * inode)1179 static int erofs_inode_reserve_data_blocks(struct erofs_inode *inode)
1180 {
1181 	struct erofs_sb_info *sbi = inode->sbi;
1182 	erofs_off_t alignedsz = round_up(inode->i_size, erofs_blksiz(sbi));
1183 	erofs_blk_t nblocks = alignedsz >> sbi->blkszbits;
1184 	struct erofs_buffer_head *bh;
1185 
1186 	/* allocate data blocks */
1187 	bh = erofs_balloc(sbi->bmgr, DATA, alignedsz, 0, 0);
1188 	if (IS_ERR(bh))
1189 		return PTR_ERR(bh);
1190 
1191 	/* get blkaddr of the bh */
1192 	(void)erofs_mapbh(NULL, bh->block);
1193 
1194 	/* write blocks except for the tail-end block */
1195 	inode->u.i_blkaddr = bh->block->blkaddr;
1196 	erofs_bdrop(bh, false);
1197 
1198 	inode->datalayout = EROFS_INODE_FLAT_PLAIN;
1199 	tarerofs_blocklist_write(inode->u.i_blkaddr, nblocks, inode->i_ino[1]);
1200 	return 0;
1201 }
1202 
1203 struct erofs_mkfs_job_ndir_ctx {
1204 	struct erofs_inode *inode;
1205 	void *ictx;
1206 	int fd;
1207 	u64 fpos;
1208 };
1209 
erofs_mkfs_job_write_file(struct erofs_mkfs_job_ndir_ctx * ctx)1210 static int erofs_mkfs_job_write_file(struct erofs_mkfs_job_ndir_ctx *ctx)
1211 {
1212 	struct erofs_inode *inode = ctx->inode;
1213 	int ret;
1214 
1215 	if (inode->datasource == EROFS_INODE_DATA_SOURCE_DISKBUF &&
1216 	    lseek(ctx->fd, ctx->fpos, SEEK_SET) < 0) {
1217 		ret = -errno;
1218 		goto out;
1219 	}
1220 
1221 	if (ctx->ictx) {
1222 		ret = erofs_write_compressed_file(ctx->ictx);
1223 		if (ret != -ENOSPC)
1224 			goto out;
1225 		if (lseek(ctx->fd, ctx->fpos, SEEK_SET) < 0) {
1226 			ret = -errno;
1227 			goto out;
1228 		}
1229 	}
1230 	/* fallback to all data uncompressed */
1231 	ret = erofs_write_unencoded_file(inode, ctx->fd, ctx->fpos);
1232 out:
1233 	if (inode->datasource == EROFS_INODE_DATA_SOURCE_DISKBUF) {
1234 		erofs_diskbuf_close(inode->i_diskbuf);
1235 		free(inode->i_diskbuf);
1236 		inode->i_diskbuf = NULL;
1237 		inode->datasource = EROFS_INODE_DATA_SOURCE_NONE;
1238 	} else {
1239 		close(ctx->fd);
1240 	}
1241 	return ret;
1242 }
1243 
erofs_mkfs_handle_nondirectory(struct erofs_mkfs_job_ndir_ctx * ctx)1244 static int erofs_mkfs_handle_nondirectory(struct erofs_mkfs_job_ndir_ctx *ctx)
1245 {
1246 	struct erofs_inode *inode = ctx->inode;
1247 	int ret = 0;
1248 
1249 	if (S_ISLNK(inode->i_mode)) {
1250 		char *symlink = inode->i_link;
1251 
1252 		if (!symlink) {
1253 			symlink = malloc(inode->i_size);
1254 			if (!symlink)
1255 				return -ENOMEM;
1256 			ret = readlink(inode->i_srcpath, symlink, inode->i_size);
1257 			if (ret < 0) {
1258 				free(symlink);
1259 				return -errno;
1260 			}
1261 		}
1262 		ret = erofs_write_file_from_buffer(inode, symlink);
1263 		free(symlink);
1264 		inode->i_link = NULL;
1265 	} else if (inode->i_size) {
1266 		if (inode->datasource == EROFS_INODE_DATA_SOURCE_RESVSP)
1267 			ret = erofs_inode_reserve_data_blocks(inode);
1268 		else if (ctx->fd >= 0)
1269 			ret = erofs_mkfs_job_write_file(ctx);
1270 	}
1271 	if (ret)
1272 		return ret;
1273 	erofs_prepare_inode_buffer(inode);
1274 	erofs_write_tail_end(inode);
1275 	return 0;
1276 }
1277 
1278 enum erofs_mkfs_jobtype {	/* ordered job types */
1279 	EROFS_MKFS_JOB_NDIR,
1280 	EROFS_MKFS_JOB_DIR,
1281 	EROFS_MKFS_JOB_DIR_BH,
1282 	EROFS_MKFS_JOB_MAX
1283 };
1284 
1285 struct erofs_mkfs_jobitem {
1286 	enum erofs_mkfs_jobtype type;
1287 	union {
1288 		struct erofs_inode *inode;
1289 		struct erofs_mkfs_job_ndir_ctx ndir;
1290 	} u;
1291 };
1292 
erofs_mkfs_jobfn(struct erofs_mkfs_jobitem * item)1293 static int erofs_mkfs_jobfn(struct erofs_mkfs_jobitem *item)
1294 {
1295 	struct erofs_inode *inode = item->u.inode;
1296 	int ret;
1297 
1298 	if (item->type == EROFS_MKFS_JOB_NDIR)
1299 		return erofs_mkfs_handle_nondirectory(&item->u.ndir);
1300 
1301 	if (item->type == EROFS_MKFS_JOB_DIR) {
1302 		ret = erofs_prepare_inode_buffer(inode);
1303 		if (ret)
1304 			return ret;
1305 		inode->bh->op = &erofs_skip_write_bhops;
1306 		return 0;
1307 	}
1308 
1309 	if (item->type == EROFS_MKFS_JOB_DIR_BH) {
1310 		ret = erofs_write_dir_file(inode);
1311 		if (ret)
1312 			return ret;
1313 		erofs_write_tail_end(inode);
1314 		inode->bh->op = &erofs_write_inode_bhops;
1315 		erofs_iput(inode);
1316 		return 0;
1317 	}
1318 	return -EINVAL;
1319 }
1320 
1321 #ifdef EROFS_MT_ENABLED
1322 
1323 struct erofs_mkfs_dfops {
1324 	pthread_t worker;
1325 	pthread_mutex_t lock;
1326 	pthread_cond_t full, empty, drain;
1327 	struct erofs_mkfs_jobitem *queue;
1328 	unsigned int entries, head, tail;
1329 };
1330 
1331 #define EROFS_MT_QUEUE_SIZE 128
1332 
erofs_mkfs_flushjobs(struct erofs_sb_info * sbi)1333 static void erofs_mkfs_flushjobs(struct erofs_sb_info *sbi)
1334 {
1335 	struct erofs_mkfs_dfops *q = sbi->mkfs_dfops;
1336 
1337 	pthread_mutex_lock(&q->lock);
1338 	pthread_cond_wait(&q->drain, &q->lock);
1339 	pthread_mutex_unlock(&q->lock);
1340 }
1341 
erofs_mkfs_pop_jobitem(struct erofs_mkfs_dfops * q)1342 static void *erofs_mkfs_pop_jobitem(struct erofs_mkfs_dfops *q)
1343 {
1344 	struct erofs_mkfs_jobitem *item;
1345 
1346 	pthread_mutex_lock(&q->lock);
1347 	while (q->head == q->tail) {
1348 		pthread_cond_signal(&q->drain);
1349 		pthread_cond_wait(&q->empty, &q->lock);
1350 	}
1351 
1352 	item = q->queue + q->head;
1353 	q->head = (q->head + 1) & (q->entries - 1);
1354 
1355 	pthread_cond_signal(&q->full);
1356 	pthread_mutex_unlock(&q->lock);
1357 	return item;
1358 }
1359 
z_erofs_mt_dfops_worker(void * arg)1360 static void *z_erofs_mt_dfops_worker(void *arg)
1361 {
1362 	struct erofs_sb_info *sbi = arg;
1363 	int ret = 0;
1364 
1365 	while (1) {
1366 		struct erofs_mkfs_jobitem *item;
1367 
1368 		item = erofs_mkfs_pop_jobitem(sbi->mkfs_dfops);
1369 		if (item->type >= EROFS_MKFS_JOB_MAX)
1370 			break;
1371 		ret = erofs_mkfs_jobfn(item);
1372 		if (ret)
1373 			break;
1374 	}
1375 	pthread_exit((void *)(uintptr_t)ret);
1376 }
1377 
erofs_mkfs_go(struct erofs_sb_info * sbi,enum erofs_mkfs_jobtype type,void * elem,int size)1378 static int erofs_mkfs_go(struct erofs_sb_info *sbi,
1379 			 enum erofs_mkfs_jobtype type, void *elem, int size)
1380 {
1381 	struct erofs_mkfs_jobitem *item;
1382 	struct erofs_mkfs_dfops *q = sbi->mkfs_dfops;
1383 
1384 	pthread_mutex_lock(&q->lock);
1385 
1386 	while (((q->tail + 1) & (q->entries - 1)) == q->head)
1387 		pthread_cond_wait(&q->full, &q->lock);
1388 
1389 	item = q->queue + q->tail;
1390 	item->type = type;
1391 	memcpy(&item->u, elem, size);
1392 	q->tail = (q->tail + 1) & (q->entries - 1);
1393 
1394 	pthread_cond_signal(&q->empty);
1395 	pthread_mutex_unlock(&q->lock);
1396 	return 0;
1397 }
1398 #else
erofs_mkfs_go(struct erofs_sb_info * sbi,enum erofs_mkfs_jobtype type,void * elem,int size)1399 static int erofs_mkfs_go(struct erofs_sb_info *sbi,
1400 			 enum erofs_mkfs_jobtype type, void *elem, int size)
1401 {
1402 	struct erofs_mkfs_jobitem item;
1403 
1404 	item.type = type;
1405 	memcpy(&item.u, elem, size);
1406 	return erofs_mkfs_jobfn(&item);
1407 }
erofs_mkfs_flushjobs(struct erofs_sb_info * sbi)1408 static void erofs_mkfs_flushjobs(struct erofs_sb_info *sbi)
1409 {
1410 }
1411 #endif
1412 
erofs_mkfs_handle_directory(struct erofs_inode * dir)1413 static int erofs_mkfs_handle_directory(struct erofs_inode *dir)
1414 {
1415 	struct erofs_sb_info *sbi = dir->sbi;
1416 	DIR *_dir;
1417 	struct dirent *dp;
1418 	struct erofs_dentry *d;
1419 	unsigned int nr_subdirs, i_nlink;
1420 	int ret;
1421 
1422 	_dir = opendir(dir->i_srcpath);
1423 	if (!_dir) {
1424 		erofs_err("failed to opendir at %s: %s",
1425 			  dir->i_srcpath, erofs_strerror(-errno));
1426 		return -errno;
1427 	}
1428 
1429 	nr_subdirs = 0;
1430 	i_nlink = 0;
1431 	while (1) {
1432 		char buf[PATH_MAX];
1433 		struct erofs_inode *inode;
1434 
1435 		/*
1436 		 * set errno to 0 before calling readdir() in order to
1437 		 * distinguish end of stream and from an error.
1438 		 */
1439 		errno = 0;
1440 		dp = readdir(_dir);
1441 		if (!dp) {
1442 			if (!errno)
1443 				break;
1444 			ret = -errno;
1445 			goto err_closedir;
1446 		}
1447 
1448 		if (is_dot_dotdot(dp->d_name)) {
1449 			++i_nlink;
1450 			continue;
1451 		}
1452 
1453 		/* skip if it's a exclude file */
1454 		if (erofs_is_exclude_path(dir->i_srcpath, dp->d_name))
1455 			continue;
1456 
1457 		d = erofs_d_alloc(dir, dp->d_name);
1458 		if (IS_ERR(d)) {
1459 			ret = PTR_ERR(d);
1460 			goto err_closedir;
1461 		}
1462 
1463 		ret = snprintf(buf, PATH_MAX, "%s/%s", dir->i_srcpath, d->name);
1464 		if (ret < 0 || ret >= PATH_MAX)
1465 			goto err_closedir;
1466 
1467 		inode = erofs_iget_from_srcpath(sbi, buf);
1468 		if (IS_ERR(inode)) {
1469 			ret = PTR_ERR(inode);
1470 			goto err_closedir;
1471 		}
1472 		d->inode = inode;
1473 		d->type = erofs_mode_to_ftype(inode->i_mode);
1474 		i_nlink += S_ISDIR(inode->i_mode);
1475 		erofs_dbg("file %s added (type %u)", buf, d->type);
1476 		nr_subdirs++;
1477 	}
1478 	closedir(_dir);
1479 
1480 	ret = erofs_init_empty_dir(dir);
1481 	if (ret)
1482 		return ret;
1483 
1484 	ret = erofs_prepare_dir_file(dir, nr_subdirs + 2); /* sort subdirs */
1485 	if (ret)
1486 		return ret;
1487 
1488 	/*
1489 	 * if there're too many subdirs as compact form, set nlink=1
1490 	 * rather than upgrade to use extented form instead.
1491 	 */
1492 	if (i_nlink > USHRT_MAX &&
1493 	    dir->inode_isize == sizeof(struct erofs_inode_compact))
1494 		dir->i_nlink = 1;
1495 	else
1496 		dir->i_nlink = i_nlink;
1497 
1498 	return erofs_mkfs_go(sbi, EROFS_MKFS_JOB_DIR, &dir, sizeof(dir));
1499 
1500 err_closedir:
1501 	closedir(_dir);
1502 	return ret;
1503 }
1504 
1505 int erofs_rebuild_load_basedir(struct erofs_inode *dir);
1506 
erofs_dentry_is_wht(struct erofs_sb_info * sbi,struct erofs_dentry * d)1507 bool erofs_dentry_is_wht(struct erofs_sb_info *sbi, struct erofs_dentry *d)
1508 {
1509 	if (!d->validnid)
1510 		return erofs_inode_is_whiteout(d->inode);
1511 	if (d->type == EROFS_FT_CHRDEV) {
1512 		struct erofs_inode ei = { .sbi = sbi, .nid = d->nid };
1513 		int ret;
1514 
1515 		ret = erofs_read_inode_from_disk(&ei);
1516 		if (ret) {
1517 			erofs_err("failed to check DT_WHT: %s",
1518 				  erofs_strerror(ret));
1519 			DBG_BUGON(1);
1520 			return false;
1521 		}
1522 		return erofs_inode_is_whiteout(&ei);
1523 	}
1524 	return false;
1525 }
1526 
erofs_rebuild_handle_directory(struct erofs_inode * dir,bool incremental)1527 static int erofs_rebuild_handle_directory(struct erofs_inode *dir,
1528 					  bool incremental)
1529 {
1530 	struct erofs_sb_info *sbi = dir->sbi;
1531 	struct erofs_dentry *d, *n;
1532 	unsigned int nr_subdirs, i_nlink;
1533 	bool delwht = cfg.c_ovlfs_strip && dir->whiteouts;
1534 	int ret;
1535 
1536 	nr_subdirs = 0;
1537 	i_nlink = 0;
1538 
1539 	list_for_each_entry_safe(d, n, &dir->i_subdirs, d_child) {
1540 		if (delwht && erofs_dentry_is_wht(sbi, d)) {
1541 			erofs_dbg("remove whiteout %s", d->inode->i_srcpath);
1542 			list_del(&d->d_child);
1543 			erofs_d_invalidate(d);
1544 			free(d);
1545 			continue;
1546 		}
1547 		i_nlink += (d->type == EROFS_FT_DIR);
1548 		++nr_subdirs;
1549 	}
1550 
1551 	DBG_BUGON(i_nlink < 2);		/* should have `.` and `..` */
1552 	DBG_BUGON(nr_subdirs < i_nlink);
1553 	ret = erofs_prepare_dir_file(dir, nr_subdirs);
1554 	if (ret)
1555 		return ret;
1556 
1557 	if (IS_ROOT(dir) && incremental)
1558 		dir->datalayout = EROFS_INODE_FLAT_PLAIN;
1559 
1560 	/*
1561 	 * if there're too many subdirs as compact form, set nlink=1
1562 	 * rather than upgrade to use extented form instead.
1563 	 */
1564 	if (i_nlink > USHRT_MAX &&
1565 	    dir->inode_isize == sizeof(struct erofs_inode_compact))
1566 		dir->i_nlink = 1;
1567 	else
1568 		dir->i_nlink = i_nlink;
1569 
1570 	return erofs_mkfs_go(sbi, EROFS_MKFS_JOB_DIR, &dir, sizeof(dir));
1571 }
1572 
erofs_mkfs_handle_inode(struct erofs_inode * inode)1573 static int erofs_mkfs_handle_inode(struct erofs_inode *inode)
1574 {
1575 	const char *relpath = erofs_fspath(inode->i_srcpath);
1576 	char *trimmed;
1577 	int ret;
1578 
1579 	trimmed = erofs_trim_for_progressinfo(relpath[0] ? relpath : "/",
1580 					      sizeof("Processing  ...") - 1);
1581 	erofs_update_progressinfo("Processing %s ...", trimmed);
1582 	free(trimmed);
1583 
1584 	ret = erofs_scan_file_xattrs(inode);
1585 	if (ret < 0)
1586 		return ret;
1587 
1588 	ret = erofs_prepare_xattr_ibody(inode, false);
1589 	if (ret < 0)
1590 		return ret;
1591 
1592 	if (!S_ISDIR(inode->i_mode)) {
1593 		struct erofs_mkfs_job_ndir_ctx ctx = { .inode = inode };
1594 
1595 		if (!S_ISLNK(inode->i_mode) && inode->i_size) {
1596 			ctx.fd = open(inode->i_srcpath, O_RDONLY | O_BINARY);
1597 			if (ctx.fd < 0)
1598 				return -errno;
1599 
1600 			if (cfg.c_compr_opts[0].alg &&
1601 			    erofs_file_is_compressible(inode)) {
1602 				ctx.ictx = erofs_begin_compressed_file(inode,
1603 								ctx.fd, 0);
1604 				if (IS_ERR(ctx.ictx))
1605 					return PTR_ERR(ctx.ictx);
1606 			}
1607 		}
1608 		ret = erofs_mkfs_go(inode->sbi, EROFS_MKFS_JOB_NDIR,
1609 				    &ctx, sizeof(ctx));
1610 	} else {
1611 		ret = erofs_mkfs_handle_directory(inode);
1612 	}
1613 	erofs_info("file /%s dumped (mode %05o)", relpath, inode->i_mode);
1614 	return ret;
1615 }
1616 
erofs_rebuild_handle_inode(struct erofs_inode * inode,bool incremental)1617 static int erofs_rebuild_handle_inode(struct erofs_inode *inode,
1618 				      bool incremental)
1619 {
1620 	char *trimmed;
1621 	int ret;
1622 
1623 	trimmed = erofs_trim_for_progressinfo(erofs_fspath(inode->i_srcpath),
1624 					      sizeof("Processing  ...") - 1);
1625 	erofs_update_progressinfo("Processing %s ...", trimmed);
1626 	free(trimmed);
1627 
1628 	if (erofs_should_use_inode_extended(inode)) {
1629 		if (cfg.c_force_inodeversion == FORCE_INODE_COMPACT) {
1630 			erofs_err("file %s cannot be in compact form",
1631 				  inode->i_srcpath);
1632 			return -EINVAL;
1633 		}
1634 		inode->inode_isize = sizeof(struct erofs_inode_extended);
1635 	} else {
1636 		inode->inode_isize = sizeof(struct erofs_inode_compact);
1637 	}
1638 
1639 	if (incremental && S_ISDIR(inode->i_mode) &&
1640 	    inode->dev == inode->sbi->dev && !inode->opaque) {
1641 		ret = erofs_rebuild_load_basedir(inode);
1642 		if (ret)
1643 			return ret;
1644 	}
1645 
1646 	/* strip all unnecessary overlayfs xattrs when ovlfs_strip is enabled */
1647 	if (cfg.c_ovlfs_strip)
1648 		erofs_clear_opaque_xattr(inode);
1649 	else if (inode->whiteouts)
1650 		erofs_set_origin_xattr(inode);
1651 
1652 	ret = erofs_prepare_xattr_ibody(inode, incremental && IS_ROOT(inode));
1653 	if (ret < 0)
1654 		return ret;
1655 
1656 	if (!S_ISDIR(inode->i_mode)) {
1657 		struct erofs_mkfs_job_ndir_ctx ctx =
1658 			{ .inode = inode, .fd = -1 };
1659 
1660 		if (S_ISREG(inode->i_mode) && inode->i_size &&
1661 		    inode->datasource == EROFS_INODE_DATA_SOURCE_DISKBUF) {
1662 			ctx.fd = erofs_diskbuf_getfd(inode->i_diskbuf, &ctx.fpos);
1663 			if (ctx.fd < 0)
1664 				return ret;
1665 
1666 			if (cfg.c_compr_opts[0].alg &&
1667 			    erofs_file_is_compressible(inode)) {
1668 				ctx.ictx = erofs_begin_compressed_file(inode,
1669 							ctx.fd, ctx.fpos);
1670 				if (IS_ERR(ctx.ictx))
1671 					return PTR_ERR(ctx.ictx);
1672 			}
1673 		}
1674 		ret = erofs_mkfs_go(inode->sbi, EROFS_MKFS_JOB_NDIR,
1675 				    &ctx, sizeof(ctx));
1676 	} else {
1677 		ret = erofs_rebuild_handle_directory(inode, incremental);
1678 	}
1679 	erofs_info("file %s dumped (mode %05o)", erofs_fspath(inode->i_srcpath),
1680 		   inode->i_mode);
1681 	return ret;
1682 }
1683 
erofs_inode_visited(struct erofs_inode * inode)1684 static bool erofs_inode_visited(struct erofs_inode *inode)
1685 {
1686 	return (unsigned long)inode->i_parent & 1UL;
1687 }
1688 
erofs_mark_parent_inode(struct erofs_inode * inode,struct erofs_inode * dir)1689 static void erofs_mark_parent_inode(struct erofs_inode *inode,
1690 				    struct erofs_inode *dir)
1691 {
1692 	inode->i_parent = (void *)((unsigned long)dir | 1);
1693 }
1694 
erofs_mkfs_dump_tree(struct erofs_inode * root,bool rebuild,bool incremental)1695 static int erofs_mkfs_dump_tree(struct erofs_inode *root, bool rebuild,
1696 				bool incremental)
1697 {
1698 	struct erofs_sb_info *sbi = root->sbi;
1699 	struct erofs_inode *dumpdir = erofs_igrab(root);
1700 	int err;
1701 
1702 	erofs_mark_parent_inode(root, root);	/* rootdir mark */
1703 	root->next_dirwrite = NULL;
1704 	/* update dev/i_ino[1] to keep track of the base image */
1705 	if (incremental) {
1706 		root->dev = root->sbi->dev;
1707 		root->i_ino[1] = sbi->root_nid;
1708 		list_del(&root->i_hash);
1709 		erofs_insert_ihash(root);
1710 	} else if (cfg.c_root_xattr_isize) {
1711 		root->xattr_isize = cfg.c_root_xattr_isize;
1712 	}
1713 
1714 	err = !rebuild ? erofs_mkfs_handle_inode(root) :
1715 			erofs_rebuild_handle_inode(root, incremental);
1716 	if (err)
1717 		return err;
1718 
1719 	/* assign root NID immediately for non-incremental builds */
1720 	if (!incremental) {
1721 		erofs_mkfs_flushjobs(sbi);
1722 		erofs_fixup_meta_blkaddr(root);
1723 		sbi->root_nid = root->nid;
1724 	}
1725 
1726 	do {
1727 		int err;
1728 		struct erofs_inode *dir = dumpdir;
1729 		/* used for adding sub-directories in reverse order due to FIFO */
1730 		struct erofs_inode *head, **last = &head;
1731 		struct erofs_dentry *d;
1732 
1733 		dumpdir = dir->next_dirwrite;
1734 		list_for_each_entry(d, &dir->i_subdirs, d_child) {
1735 			struct erofs_inode *inode = d->inode;
1736 
1737 			if (is_dot_dotdot(d->name) || d->validnid)
1738 				continue;
1739 
1740 			if (!erofs_inode_visited(inode)) {
1741 				DBG_BUGON(rebuild &&
1742 					  erofs_parent_inode(inode) != dir);
1743 				erofs_mark_parent_inode(inode, dir);
1744 
1745 				if (!rebuild)
1746 					err = erofs_mkfs_handle_inode(inode);
1747 				else
1748 					err = erofs_rebuild_handle_inode(inode,
1749 								incremental);
1750 				if (err)
1751 					break;
1752 				if (S_ISDIR(inode->i_mode)) {
1753 					*last = inode;
1754 					last = &inode->next_dirwrite;
1755 					(void)erofs_igrab(inode);
1756 				}
1757 			} else if (!rebuild) {
1758 				++inode->i_nlink;
1759 			}
1760 		}
1761 		*last = dumpdir;	/* fixup the last (or the only) one */
1762 		dumpdir = head;
1763 		err = erofs_mkfs_go(sbi, EROFS_MKFS_JOB_DIR_BH,
1764 				    &dir, sizeof(dir));
1765 		if (err)
1766 			return err;
1767 	} while (dumpdir);
1768 
1769 	return err;
1770 }
1771 
1772 struct erofs_mkfs_buildtree_ctx {
1773 	struct erofs_sb_info *sbi;
1774 	union {
1775 		const char *path;
1776 		struct erofs_inode *root;
1777 	} u;
1778 	bool incremental;
1779 };
1780 #ifndef EROFS_MT_ENABLED
1781 #define __erofs_mkfs_build_tree erofs_mkfs_build_tree
1782 #endif
1783 
__erofs_mkfs_build_tree(struct erofs_mkfs_buildtree_ctx * ctx)1784 static int __erofs_mkfs_build_tree(struct erofs_mkfs_buildtree_ctx *ctx)
1785 {
1786 	bool from_path = !!ctx->sbi;
1787 	struct erofs_inode *root;
1788 	int err;
1789 
1790 	if (from_path) {
1791 		root = erofs_iget_from_srcpath(ctx->sbi, ctx->u.path);
1792 		if (IS_ERR(root))
1793 			return PTR_ERR(root);
1794 	} else {
1795 		root = ctx->u.root;
1796 	}
1797 
1798 	err = erofs_mkfs_dump_tree(root, !from_path, ctx->incremental);
1799 	if (err) {
1800 		if (from_path)
1801 			erofs_iput(root);
1802 		return err;
1803 	}
1804 	ctx->u.root = root;
1805 	return 0;
1806 }
1807 
1808 #ifdef EROFS_MT_ENABLED
erofs_mkfs_build_tree(struct erofs_mkfs_buildtree_ctx * ctx)1809 static int erofs_mkfs_build_tree(struct erofs_mkfs_buildtree_ctx *ctx)
1810 {
1811 	struct erofs_mkfs_dfops *q;
1812 	int err, err2;
1813 	struct erofs_sb_info *sbi = ctx->sbi ? ctx->sbi : ctx->u.root->sbi;
1814 
1815 	q = malloc(sizeof(*q));
1816 	if (!q)
1817 		return -ENOMEM;
1818 
1819 	q->entries = EROFS_MT_QUEUE_SIZE;
1820 	q->queue = malloc(q->entries * sizeof(*q->queue));
1821 	if (!q->queue) {
1822 		free(q);
1823 		return -ENOMEM;
1824 	}
1825 	pthread_mutex_init(&q->lock, NULL);
1826 	pthread_cond_init(&q->empty, NULL);
1827 	pthread_cond_init(&q->full, NULL);
1828 	pthread_cond_init(&q->drain, NULL);
1829 
1830 	q->head = 0;
1831 	q->tail = 0;
1832 	sbi->mkfs_dfops = q;
1833 	err = pthread_create(&sbi->dfops_worker, NULL,
1834 			     z_erofs_mt_dfops_worker, sbi);
1835 	if (err)
1836 		goto fail;
1837 
1838 	err = __erofs_mkfs_build_tree(ctx);
1839 	erofs_mkfs_go(sbi, ~0, NULL, 0);
1840 	err2 = pthread_join(sbi->dfops_worker, NULL);
1841 	if (!err)
1842 		err = err2;
1843 
1844 fail:
1845 	pthread_cond_destroy(&q->empty);
1846 	pthread_cond_destroy(&q->full);
1847 	pthread_cond_destroy(&q->drain);
1848 	pthread_mutex_destroy(&q->lock);
1849 	free(q->queue);
1850 	free(q);
1851 	return err;
1852 }
1853 #endif
1854 
erofs_mkfs_build_tree_from_path(struct erofs_sb_info * sbi,const char * path)1855 struct erofs_inode *erofs_mkfs_build_tree_from_path(struct erofs_sb_info *sbi,
1856 						    const char *path)
1857 {
1858 	struct erofs_mkfs_buildtree_ctx ctx = {
1859 		.sbi = sbi,
1860 		.u.path = path,
1861 	};
1862 	int err;
1863 
1864 	if (!sbi)
1865 		return ERR_PTR(-EINVAL);
1866 	err = erofs_mkfs_build_tree(&ctx);
1867 	if (err)
1868 		return ERR_PTR(err);
1869 	return ctx.u.root;
1870 }
1871 
erofs_rebuild_dump_tree(struct erofs_inode * root,bool incremental)1872 int erofs_rebuild_dump_tree(struct erofs_inode *root, bool incremental)
1873 {
1874 	return erofs_mkfs_build_tree(&((struct erofs_mkfs_buildtree_ctx) {
1875 		.sbi = NULL,
1876 		.u.root = root,
1877 		.incremental = incremental,
1878 	}));
1879 }
1880 
erofs_mkfs_build_special_from_fd(struct erofs_sb_info * sbi,int fd,const char * name)1881 struct erofs_inode *erofs_mkfs_build_special_from_fd(struct erofs_sb_info *sbi,
1882 						     int fd, const char *name)
1883 {
1884 	struct stat st;
1885 	struct erofs_inode *inode;
1886 	void *ictx;
1887 	int ret;
1888 
1889 	ret = lseek(fd, 0, SEEK_SET);
1890 	if (ret < 0)
1891 		return ERR_PTR(-errno);
1892 
1893 	ret = fstat(fd, &st);
1894 	if (ret)
1895 		return ERR_PTR(-errno);
1896 
1897 	inode = erofs_new_inode(sbi);
1898 	if (IS_ERR(inode))
1899 		return inode;
1900 
1901 	if (name == EROFS_PACKED_INODE) {
1902 		st.st_uid = st.st_gid = 0;
1903 		st.st_nlink = 0;
1904 	}
1905 
1906 	ret = erofs_fill_inode(inode, &st, name);
1907 	if (ret) {
1908 		free(inode);
1909 		return ERR_PTR(ret);
1910 	}
1911 
1912 	if (name == EROFS_PACKED_INODE) {
1913 		inode->sbi->packed_nid = EROFS_PACKED_NID_UNALLOCATED;
1914 		inode->nid = inode->sbi->packed_nid;
1915 	}
1916 
1917 	if (cfg.c_compr_opts[0].alg &&
1918 	    erofs_file_is_compressible(inode)) {
1919 		ictx = erofs_begin_compressed_file(inode, fd, 0);
1920 		if (IS_ERR(ictx))
1921 			return ERR_CAST(ictx);
1922 
1923 		DBG_BUGON(!ictx);
1924 		ret = erofs_write_compressed_file(ictx);
1925 		if (ret && ret != -ENOSPC)
1926 			 return ERR_PTR(ret);
1927 
1928 		ret = lseek(fd, 0, SEEK_SET);
1929 		if (ret < 0)
1930 			return ERR_PTR(-errno);
1931 	}
1932 	ret = write_uncompressed_file_from_fd(inode, fd);
1933 	if (ret)
1934 		return ERR_PTR(ret);
1935 	erofs_prepare_inode_buffer(inode);
1936 	erofs_write_tail_end(inode);
1937 	return inode;
1938 }
1939 
erofs_fixup_root_inode(struct erofs_inode * root)1940 int erofs_fixup_root_inode(struct erofs_inode *root)
1941 {
1942 	struct erofs_sb_info *sbi = root->sbi;
1943 	struct erofs_inode oi;
1944 	unsigned int ondisk_capacity, ondisk_size;
1945 	char *ibuf;
1946 	int err;
1947 
1948 	if (sbi->root_nid == root->nid)		/* for most mkfs cases */
1949 		return 0;
1950 
1951 	if (root->nid <= 0xffff) {
1952 		sbi->root_nid = root->nid;
1953 		return 0;
1954 	}
1955 
1956 	oi = (struct erofs_inode){ .sbi = sbi, .nid = sbi->root_nid };
1957 	err = erofs_read_inode_from_disk(&oi);
1958 	if (err) {
1959 		erofs_err("failed to read root inode: %s",
1960 			  erofs_strerror(err));
1961 		return err;
1962 	}
1963 
1964 	if (oi.datalayout != EROFS_INODE_FLAT_INLINE &&
1965 	    oi.datalayout != EROFS_INODE_FLAT_PLAIN)
1966 		return -EOPNOTSUPP;
1967 
1968 	ondisk_capacity = oi.inode_isize + oi.xattr_isize;
1969 	if (oi.datalayout == EROFS_INODE_FLAT_INLINE)
1970 		ondisk_capacity += erofs_blkoff(sbi, oi.i_size);
1971 
1972 	ondisk_size = root->inode_isize + root->xattr_isize;
1973 	if (root->extent_isize)
1974 		ondisk_size = roundup(ondisk_size, 8) + root->extent_isize;
1975 	ondisk_size += root->idata_size;
1976 
1977 	if (ondisk_size > ondisk_capacity) {
1978 		erofs_err("no enough room for the root inode from nid %llu",
1979 			  root->nid);
1980 		return -ENOSPC;
1981 	}
1982 
1983 	ibuf = malloc(ondisk_size);
1984 	if (!ibuf)
1985 		return -ENOMEM;
1986 	err = erofs_dev_read(sbi, 0, ibuf, erofs_iloc(root), ondisk_size);
1987 	if (err >= 0)
1988 		err = erofs_dev_write(sbi, ibuf, erofs_iloc(&oi), ondisk_size);
1989 	free(ibuf);
1990 	return err;
1991 }
1992 
erofs_rebuild_make_root(struct erofs_sb_info * sbi)1993 struct erofs_inode *erofs_rebuild_make_root(struct erofs_sb_info *sbi)
1994 {
1995 	struct erofs_inode *root;
1996 
1997 	root = erofs_new_inode(sbi);
1998 	if (IS_ERR(root))
1999 		return root;
2000 	root->i_srcpath = strdup("/");
2001 	root->i_mode = S_IFDIR | 0777;
2002 	root->i_parent = root;
2003 	root->i_mtime = root->sbi->build_time;
2004 	root->i_mtime_nsec = root->sbi->build_time_nsec;
2005 	erofs_init_empty_dir(root);
2006 	return root;
2007 }
2008