1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * erofs-utils/lib/blobchunk.c
4 *
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #define _GNU_SOURCE
8 #include "erofs/hashmap.h"
9 #include "erofs/blobchunk.h"
10 #include "erofs/block_list.h"
11 #include "erofs/cache.h"
12 #include "sha256.h"
13 #include <unistd.h>
14
15 struct erofs_blobchunk {
16 union {
17 struct hashmap_entry ent;
18 struct list_head list;
19 };
20 char sha256[32];
21 unsigned int device_id;
22 union {
23 erofs_off_t chunksize;
24 erofs_off_t sourceoffset;
25 };
26 erofs_blk_t blkaddr;
27 };
28
29 static struct hashmap blob_hashmap;
30 static FILE *blobfile;
31 static erofs_blk_t remapped_base;
32 static erofs_off_t datablob_size;
33 static bool multidev;
34 static struct erofs_buffer_head *bh_devt;
35 struct erofs_blobchunk erofs_holechunk = {
36 .blkaddr = EROFS_NULL_ADDR,
37 };
38 static LIST_HEAD(unhashed_blobchunks);
39
erofs_get_unhashed_chunk(unsigned int device_id,erofs_blk_t blkaddr,erofs_off_t sourceoffset)40 struct erofs_blobchunk *erofs_get_unhashed_chunk(unsigned int device_id,
41 erofs_blk_t blkaddr, erofs_off_t sourceoffset)
42 {
43 struct erofs_blobchunk *chunk;
44
45 chunk = calloc(1, sizeof(struct erofs_blobchunk));
46 if (!chunk)
47 return ERR_PTR(-ENOMEM);
48
49 chunk->device_id = device_id;
50 chunk->blkaddr = blkaddr;
51 chunk->sourceoffset = sourceoffset;
52 list_add_tail(&chunk->list, &unhashed_blobchunks);
53 return chunk;
54 }
55
erofs_blob_getchunk(struct erofs_sb_info * sbi,u8 * buf,erofs_off_t chunksize)56 static struct erofs_blobchunk *erofs_blob_getchunk(struct erofs_sb_info *sbi,
57 u8 *buf, erofs_off_t chunksize)
58 {
59 static u8 zeroed[EROFS_MAX_BLOCK_SIZE];
60 struct erofs_blobchunk *chunk;
61 unsigned int hash, padding;
62 u8 sha256[32];
63 erofs_off_t blkpos;
64 int ret;
65
66 erofs_sha256(buf, chunksize, sha256);
67 hash = memhash(sha256, sizeof(sha256));
68 chunk = hashmap_get_from_hash(&blob_hashmap, hash, sha256);
69 if (chunk) {
70 DBG_BUGON(chunksize != chunk->chunksize);
71
72 sbi->saved_by_deduplication += chunksize;
73 if (chunk->blkaddr == erofs_holechunk.blkaddr) {
74 chunk = &erofs_holechunk;
75 erofs_dbg("Found duplicated hole chunk");
76 } else {
77 erofs_dbg("Found duplicated chunk at %u",
78 chunk->blkaddr);
79 }
80 return chunk;
81 }
82
83 chunk = malloc(sizeof(struct erofs_blobchunk));
84 if (!chunk)
85 return ERR_PTR(-ENOMEM);
86
87 chunk->chunksize = chunksize;
88 memcpy(chunk->sha256, sha256, sizeof(sha256));
89 blkpos = ftell(blobfile);
90 DBG_BUGON(erofs_blkoff(sbi, blkpos));
91
92 if (sbi->extra_devices)
93 chunk->device_id = 1;
94 else
95 chunk->device_id = 0;
96 chunk->blkaddr = erofs_blknr(sbi, blkpos);
97
98 erofs_dbg("Writing chunk (%u bytes) to %u", chunksize, chunk->blkaddr);
99 ret = fwrite(buf, chunksize, 1, blobfile);
100 if (ret == 1) {
101 padding = erofs_blkoff(sbi, chunksize);
102 if (padding) {
103 padding = erofs_blksiz(sbi) - padding;
104 ret = fwrite(zeroed, padding, 1, blobfile);
105 }
106 }
107
108 if (ret < 1) {
109 free(chunk);
110 return ERR_PTR(-ENOSPC);
111 }
112
113 hashmap_entry_init(&chunk->ent, hash);
114 hashmap_add(&blob_hashmap, chunk);
115 return chunk;
116 }
117
erofs_blob_hashmap_cmp(const void * a,const void * b,const void * key)118 static int erofs_blob_hashmap_cmp(const void *a, const void *b,
119 const void *key)
120 {
121 const struct erofs_blobchunk *ec1 =
122 container_of((struct hashmap_entry *)a,
123 struct erofs_blobchunk, ent);
124 const struct erofs_blobchunk *ec2 =
125 container_of((struct hashmap_entry *)b,
126 struct erofs_blobchunk, ent);
127
128 return memcmp(ec1->sha256, key ? key : ec2->sha256,
129 sizeof(ec1->sha256));
130 }
131
erofs_blob_write_chunk_indexes(struct erofs_inode * inode,erofs_off_t off)132 int erofs_blob_write_chunk_indexes(struct erofs_inode *inode,
133 erofs_off_t off)
134 {
135 struct erofs_inode_chunk_index idx = {0};
136 erofs_blk_t extent_start = EROFS_NULL_ADDR;
137 erofs_blk_t extent_end, chunkblks;
138 erofs_off_t source_offset;
139 unsigned int dst, src, unit;
140 bool first_extent = true;
141
142 if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
143 unit = sizeof(struct erofs_inode_chunk_index);
144 else
145 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
146
147 chunkblks = 1U << (inode->u.chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
148 for (dst = src = 0; dst < inode->extent_isize;
149 src += sizeof(void *), dst += unit) {
150 struct erofs_blobchunk *chunk;
151
152 chunk = *(void **)(inode->chunkindexes + src);
153
154 if (chunk->blkaddr == EROFS_NULL_ADDR) {
155 idx.blkaddr = EROFS_NULL_ADDR;
156 } else if (chunk->device_id) {
157 DBG_BUGON(!(inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES));
158 idx.blkaddr = chunk->blkaddr;
159 extent_start = EROFS_NULL_ADDR;
160 } else {
161 idx.blkaddr = remapped_base + chunk->blkaddr;
162 }
163
164 if (extent_start == EROFS_NULL_ADDR ||
165 idx.blkaddr != extent_end) {
166 if (extent_start != EROFS_NULL_ADDR) {
167 tarerofs_blocklist_write(extent_start,
168 extent_end - extent_start,
169 source_offset);
170 erofs_droid_blocklist_write_extent(inode,
171 extent_start,
172 extent_end - extent_start,
173 first_extent, false);
174 first_extent = false;
175 }
176 extent_start = idx.blkaddr;
177 source_offset = chunk->sourceoffset;
178 }
179 extent_end = idx.blkaddr + chunkblks;
180 idx.device_id = cpu_to_le16(chunk->device_id);
181 idx.blkaddr = cpu_to_le32(idx.blkaddr);
182
183 if (unit == EROFS_BLOCK_MAP_ENTRY_SIZE)
184 memcpy(inode->chunkindexes + dst, &idx.blkaddr, unit);
185 else
186 memcpy(inode->chunkindexes + dst, &idx, sizeof(idx));
187 }
188 off = roundup(off, unit);
189 if (extent_start != EROFS_NULL_ADDR)
190 tarerofs_blocklist_write(extent_start, extent_end - extent_start,
191 source_offset);
192 erofs_droid_blocklist_write_extent(inode, extent_start,
193 extent_start == EROFS_NULL_ADDR ?
194 0 : extent_end - extent_start,
195 first_extent, true);
196
197 return erofs_dev_write(inode->sbi, inode->chunkindexes, off,
198 inode->extent_isize);
199 }
200
erofs_blob_mergechunks(struct erofs_inode * inode,unsigned int chunkbits,unsigned int new_chunkbits)201 int erofs_blob_mergechunks(struct erofs_inode *inode, unsigned int chunkbits,
202 unsigned int new_chunkbits)
203 {
204 struct erofs_sb_info *sbi = inode->sbi;
205 unsigned int dst, src, unit, count;
206
207 if (new_chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
208 new_chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
209 if (chunkbits >= new_chunkbits) /* no need to merge */
210 goto out;
211
212 if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
213 unit = sizeof(struct erofs_inode_chunk_index);
214 else
215 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
216
217 count = round_up(inode->i_size, 1ULL << new_chunkbits) >> new_chunkbits;
218 for (dst = src = 0; dst < count; ++dst) {
219 *((void **)inode->chunkindexes + dst) =
220 *((void **)inode->chunkindexes + src);
221 src += 1U << (new_chunkbits - chunkbits);
222 }
223
224 DBG_BUGON(count * unit >= inode->extent_isize);
225 inode->extent_isize = count * unit;
226 chunkbits = new_chunkbits;
227 out:
228 inode->u.chunkformat = (chunkbits - sbi->blkszbits) |
229 (inode->u.chunkformat & ~EROFS_CHUNK_FORMAT_BLKBITS_MASK);
230 return 0;
231 }
232
erofs_update_minextblks(struct erofs_sb_info * sbi,erofs_off_t start,erofs_off_t end,erofs_blk_t * minextblks)233 static void erofs_update_minextblks(struct erofs_sb_info *sbi,
234 erofs_off_t start, erofs_off_t end, erofs_blk_t *minextblks)
235 {
236 erofs_blk_t lb;
237 lb = lowbit((end - start) >> sbi->blkszbits);
238 if (lb && lb < *minextblks)
239 *minextblks = lb;
240 }
erofs_blob_can_merge(struct erofs_sb_info * sbi,struct erofs_blobchunk * lastch,struct erofs_blobchunk * chunk)241 static bool erofs_blob_can_merge(struct erofs_sb_info *sbi,
242 struct erofs_blobchunk *lastch,
243 struct erofs_blobchunk *chunk)
244 {
245 if (!lastch)
246 return true;
247 if (lastch == &erofs_holechunk && chunk == &erofs_holechunk)
248 return true;
249 if (lastch->device_id == chunk->device_id &&
250 erofs_pos(sbi, lastch->blkaddr) + lastch->chunksize ==
251 erofs_pos(sbi, chunk->blkaddr))
252 return true;
253
254 return false;
255 }
erofs_blob_write_chunked_file(struct erofs_inode * inode,int fd,erofs_off_t startoff)256 int erofs_blob_write_chunked_file(struct erofs_inode *inode, int fd,
257 erofs_off_t startoff)
258 {
259 struct erofs_sb_info *sbi = inode->sbi;
260 unsigned int chunkbits = cfg.c_chunkbits;
261 unsigned int count, unit;
262 struct erofs_blobchunk *chunk, *lastch;
263 struct erofs_inode_chunk_index *idx;
264 erofs_off_t pos, len, chunksize, interval_start;
265 erofs_blk_t minextblks;
266 u8 *chunkdata;
267 int ret;
268
269 #ifdef SEEK_DATA
270 /* if the file is fully sparsed, use one big chunk instead */
271 if (lseek(fd, startoff, SEEK_DATA) < 0 && errno == ENXIO) {
272 chunkbits = ilog2(inode->i_size - 1) + 1;
273 if (chunkbits < sbi->blkszbits)
274 chunkbits = sbi->blkszbits;
275 }
276 #endif
277 if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
278 chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
279 chunksize = 1ULL << chunkbits;
280 count = DIV_ROUND_UP(inode->i_size, chunksize);
281
282 if (sbi->extra_devices)
283 inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
284 if (inode->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
285 unit = sizeof(struct erofs_inode_chunk_index);
286 else
287 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
288
289 chunkdata = malloc(chunksize);
290 if (!chunkdata)
291 return -ENOMEM;
292
293 inode->extent_isize = count * unit;
294 inode->chunkindexes = malloc(count * max(sizeof(*idx), sizeof(void *)));
295 if (!inode->chunkindexes) {
296 ret = -ENOMEM;
297 goto err;
298 }
299 idx = inode->chunkindexes;
300 lastch = NULL;
301 minextblks = BLK_ROUND_UP(sbi, inode->i_size);
302 interval_start = 0;
303
304 for (pos = 0; pos < inode->i_size; pos += len) {
305 #ifdef SEEK_DATA
306 off_t offset = lseek(fd, pos + startoff, SEEK_DATA);
307
308 if (offset < 0) {
309 if (errno != ENXIO)
310 offset = pos;
311 else
312 offset = ((pos >> chunkbits) + 1) << chunkbits;
313 } else {
314 offset -= startoff;
315
316 if (offset != (offset & ~(chunksize - 1))) {
317 offset &= ~(chunksize - 1);
318 if (lseek(fd, offset + startoff, SEEK_SET) !=
319 startoff + offset) {
320 ret = -EIO;
321 goto err;
322 }
323 }
324 }
325
326 if (offset > pos) {
327 if (!erofs_blob_can_merge(sbi, lastch,
328 &erofs_holechunk)) {
329 erofs_update_minextblks(sbi, interval_start,
330 pos, &minextblks);
331 interval_start = pos;
332 }
333 do {
334 *(void **)idx++ = &erofs_holechunk;
335 pos += chunksize;
336 } while (pos < offset);
337 DBG_BUGON(pos != offset);
338 lastch = &erofs_holechunk;
339 len = 0;
340 continue;
341 }
342 #endif
343
344 len = min_t(u64, inode->i_size - pos, chunksize);
345 ret = read(fd, chunkdata, len);
346 if (ret < len) {
347 ret = -EIO;
348 goto err;
349 }
350
351 chunk = erofs_blob_getchunk(sbi, chunkdata, len);
352 if (IS_ERR(chunk)) {
353 ret = PTR_ERR(chunk);
354 goto err;
355 }
356
357 if (!erofs_blob_can_merge(sbi, lastch, chunk)) {
358 erofs_update_minextblks(sbi, interval_start, pos,
359 &minextblks);
360 interval_start = pos;
361 }
362 *(void **)idx++ = chunk;
363 lastch = chunk;
364 }
365 erofs_update_minextblks(sbi, interval_start, pos, &minextblks);
366 inode->datalayout = EROFS_INODE_CHUNK_BASED;
367 free(chunkdata);
368 return erofs_blob_mergechunks(inode, chunkbits,
369 ilog2(minextblks) + sbi->blkszbits);
370 err:
371 free(inode->chunkindexes);
372 inode->chunkindexes = NULL;
373 free(chunkdata);
374 return ret;
375 }
376
erofs_write_zero_inode(struct erofs_inode * inode)377 int erofs_write_zero_inode(struct erofs_inode *inode)
378 {
379 struct erofs_sb_info *sbi = inode->sbi;
380 unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
381 unsigned int count;
382 erofs_off_t chunksize, len, pos;
383 struct erofs_inode_chunk_index *idx;
384
385 if (chunkbits < sbi->blkszbits)
386 chunkbits = sbi->blkszbits;
387 if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
388 chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
389
390 inode->u.chunkformat |= chunkbits - sbi->blkszbits;
391
392 chunksize = 1ULL << chunkbits;
393 count = DIV_ROUND_UP(inode->i_size, chunksize);
394
395 inode->extent_isize = count * EROFS_BLOCK_MAP_ENTRY_SIZE;
396 idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
397 if (!idx)
398 return -ENOMEM;
399 inode->chunkindexes = idx;
400
401 for (pos = 0; pos < inode->i_size; pos += len) {
402 struct erofs_blobchunk *chunk;
403
404 len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
405 chunk = erofs_get_unhashed_chunk(0, EROFS_NULL_ADDR, -1);
406 if (IS_ERR(chunk)) {
407 free(inode->chunkindexes);
408 inode->chunkindexes = NULL;
409 return PTR_ERR(chunk);
410 }
411
412 *(void **)idx++ = chunk;
413 }
414 inode->datalayout = EROFS_INODE_CHUNK_BASED;
415 return 0;
416 }
417
tarerofs_write_chunkes(struct erofs_inode * inode,erofs_off_t data_offset)418 int tarerofs_write_chunkes(struct erofs_inode *inode, erofs_off_t data_offset)
419 {
420 struct erofs_sb_info *sbi = inode->sbi;
421 unsigned int chunkbits = ilog2(inode->i_size - 1) + 1;
422 unsigned int count, unit, device_id;
423 erofs_off_t chunksize, len, pos;
424 erofs_blk_t blkaddr;
425 struct erofs_inode_chunk_index *idx;
426
427 if (chunkbits < sbi->blkszbits)
428 chunkbits = sbi->blkszbits;
429 if (chunkbits - sbi->blkszbits > EROFS_CHUNK_FORMAT_BLKBITS_MASK)
430 chunkbits = EROFS_CHUNK_FORMAT_BLKBITS_MASK + sbi->blkszbits;
431
432 inode->u.chunkformat |= chunkbits - sbi->blkszbits;
433 if (sbi->extra_devices) {
434 device_id = 1;
435 inode->u.chunkformat |= EROFS_CHUNK_FORMAT_INDEXES;
436 unit = sizeof(struct erofs_inode_chunk_index);
437 DBG_BUGON(erofs_blkoff(sbi, data_offset));
438 blkaddr = erofs_blknr(sbi, data_offset);
439 } else {
440 device_id = 0;
441 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;
442 DBG_BUGON(erofs_blkoff(sbi, datablob_size));
443 blkaddr = erofs_blknr(sbi, datablob_size);
444 datablob_size += round_up(inode->i_size, erofs_blksiz(sbi));
445 }
446 chunksize = 1ULL << chunkbits;
447 count = DIV_ROUND_UP(inode->i_size, chunksize);
448
449 inode->extent_isize = count * unit;
450 idx = calloc(count, max(sizeof(*idx), sizeof(void *)));
451 if (!idx)
452 return -ENOMEM;
453 inode->chunkindexes = idx;
454
455 for (pos = 0; pos < inode->i_size; pos += len) {
456 struct erofs_blobchunk *chunk;
457
458 len = min_t(erofs_off_t, inode->i_size - pos, chunksize);
459
460 chunk = erofs_get_unhashed_chunk(device_id, blkaddr,
461 data_offset);
462 if (IS_ERR(chunk)) {
463 free(inode->chunkindexes);
464 inode->chunkindexes = NULL;
465 return PTR_ERR(chunk);
466 }
467
468 *(void **)idx++ = chunk;
469 blkaddr += erofs_blknr(sbi, len);
470 data_offset += len;
471 }
472 inode->datalayout = EROFS_INODE_CHUNK_BASED;
473 return 0;
474 }
475
erofs_mkfs_dump_blobs(struct erofs_sb_info * sbi)476 int erofs_mkfs_dump_blobs(struct erofs_sb_info *sbi)
477 {
478 struct erofs_buffer_head *bh;
479 ssize_t length;
480 u64 pos_in, pos_out;
481 ssize_t ret;
482
483 if (blobfile) {
484 fflush(blobfile);
485 length = ftell(blobfile);
486 if (length < 0)
487 return -errno;
488
489 if (sbi->extra_devices)
490 sbi->devs[0].blocks = erofs_blknr(sbi, length);
491 else
492 datablob_size = length;
493 }
494
495 if (sbi->extra_devices) {
496 unsigned int i, ret;
497 erofs_blk_t nblocks;
498
499 nblocks = erofs_mapbh(sbi->bmgr, NULL);
500 pos_out = erofs_btell(bh_devt, false);
501 i = 0;
502 do {
503 struct erofs_deviceslot dis = {
504 .mapped_blkaddr = cpu_to_le32(nblocks),
505 .blocks = cpu_to_le32(sbi->devs[i].blocks),
506 };
507
508 memcpy(dis.tag, sbi->devs[i].tag, sizeof(dis.tag));
509 ret = erofs_dev_write(sbi, &dis, pos_out, sizeof(dis));
510 if (ret)
511 return ret;
512 pos_out += sizeof(dis);
513 nblocks += sbi->devs[i].blocks;
514 } while (++i < sbi->extra_devices);
515 bh_devt->op = &erofs_drop_directly_bhops;
516 erofs_bdrop(bh_devt, false);
517 return 0;
518 }
519
520 bh = erofs_balloc(sbi->bmgr, DATA, datablob_size, 0, 0);
521 if (IS_ERR(bh))
522 return PTR_ERR(bh);
523
524 erofs_mapbh(NULL, bh->block);
525
526 pos_out = erofs_btell(bh, false);
527 remapped_base = erofs_blknr(sbi, pos_out);
528 pos_out += sbi->bdev.offset;
529 if (blobfile) {
530 pos_in = 0;
531 ret = erofs_copy_file_range(fileno(blobfile), &pos_in,
532 sbi->bdev.fd, &pos_out, datablob_size);
533 ret = ret < datablob_size ? -EIO : 0;
534 } else {
535 ret = erofs_io_ftruncate(&sbi->bdev, pos_out + datablob_size);
536 }
537 bh->op = &erofs_drop_directly_bhops;
538 erofs_bdrop(bh, false);
539 return ret;
540 }
541
erofs_blob_exit(void)542 void erofs_blob_exit(void)
543 {
544 struct hashmap_iter iter;
545 struct hashmap_entry *e;
546 struct erofs_blobchunk *bc, *n;
547
548 if (blobfile)
549 fclose(blobfile);
550
551 /* Disable hashmap shrink, effectively disabling rehash.
552 * This way we can iterate over entire hashmap efficiently
553 * and safely by using hashmap_iter_next() */
554 hashmap_disable_shrink(&blob_hashmap);
555 e = hashmap_iter_first(&blob_hashmap, &iter);
556 while (e) {
557 bc = container_of((struct hashmap_entry *)e,
558 struct erofs_blobchunk, ent);
559 DBG_BUGON(hashmap_remove(&blob_hashmap, e) != e);
560 free(bc);
561 e = hashmap_iter_next(&iter);
562 }
563 DBG_BUGON(hashmap_free(&blob_hashmap));
564
565 list_for_each_entry_safe(bc, n, &unhashed_blobchunks, list) {
566 list_del(&bc->list);
567 free(bc);
568 }
569 }
570
erofs_insert_zerochunk(erofs_off_t chunksize)571 static int erofs_insert_zerochunk(erofs_off_t chunksize)
572 {
573 u8 *zeros;
574 struct erofs_blobchunk *chunk;
575 u8 sha256[32];
576 unsigned int hash;
577 int ret = 0;
578
579 zeros = calloc(1, chunksize);
580 if (!zeros)
581 return -ENOMEM;
582
583 erofs_sha256(zeros, chunksize, sha256);
584 free(zeros);
585 hash = memhash(sha256, sizeof(sha256));
586 chunk = malloc(sizeof(struct erofs_blobchunk));
587 if (!chunk)
588 return -ENOMEM;
589
590 chunk->chunksize = chunksize;
591 /* treat chunk filled with zeros as hole */
592 chunk->blkaddr = erofs_holechunk.blkaddr;
593 memcpy(chunk->sha256, sha256, sizeof(sha256));
594
595 hashmap_entry_init(&chunk->ent, hash);
596 hashmap_add(&blob_hashmap, chunk);
597 return ret;
598 }
599
erofs_blob_init(const char * blobfile_path,erofs_off_t chunksize)600 int erofs_blob_init(const char *blobfile_path, erofs_off_t chunksize)
601 {
602 if (!blobfile_path) {
603 #ifdef HAVE_TMPFILE64
604 blobfile = tmpfile64();
605 #else
606 blobfile = tmpfile();
607 #endif
608 multidev = false;
609 } else {
610 blobfile = fopen(blobfile_path, "wb");
611 multidev = true;
612 }
613 if (!blobfile)
614 return -EACCES;
615
616 hashmap_init(&blob_hashmap, erofs_blob_hashmap_cmp, 0);
617 return erofs_insert_zerochunk(chunksize);
618 }
619
erofs_mkfs_init_devices(struct erofs_sb_info * sbi,unsigned int devices)620 int erofs_mkfs_init_devices(struct erofs_sb_info *sbi, unsigned int devices)
621 {
622 if (!devices)
623 return 0;
624
625 sbi->devs = calloc(devices, sizeof(sbi->devs[0]));
626 if (!sbi->devs)
627 return -ENOMEM;
628
629 bh_devt = erofs_balloc(sbi->bmgr, DEVT,
630 sizeof(struct erofs_deviceslot) * devices, 0, 0);
631 if (IS_ERR(bh_devt)) {
632 free(sbi->devs);
633 return PTR_ERR(bh_devt);
634 }
635 erofs_mapbh(NULL, bh_devt->block);
636 bh_devt->op = &erofs_skip_write_bhops;
637 sbi->devt_slotoff = erofs_btell(bh_devt, false) / EROFS_DEVT_SLOT_SIZE;
638 sbi->extra_devices = devices;
639 erofs_sb_set_device_table(sbi);
640 return 0;
641 }
642