1 /**
2 * segment.c
3 *
4 * Many parts of codes are copied from Linux kernel/fs/f2fs.
5 *
6 * Copyright (C) 2015 Huawei Ltd.
7 * Witten by:
8 * Hou Pengyang <[email protected]>
9 * Liu Shuoran <[email protected]>
10 * Jaegeuk Kim <[email protected]>
11 * Copyright (c) 2020 Google Inc.
12 * Robin Hsu <[email protected]>
13 * : add sload compression support
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 */
19 #include "fsck.h"
20 #include "node.h"
21 #include "quotaio.h"
22
reserve_new_block(struct f2fs_sb_info * sbi,block_t * to,struct f2fs_summary * sum,int type,bool is_inode)23 int reserve_new_block(struct f2fs_sb_info *sbi, block_t *to,
24 struct f2fs_summary *sum, int type, bool is_inode)
25 {
26 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
27 struct seg_entry *se;
28 u64 blkaddr, offset;
29 u64 old_blkaddr = *to;
30 bool is_node = IS_NODESEG(type);
31 int left = 0;
32
33 if (old_blkaddr == NULL_ADDR) {
34 if (c.func == FSCK) {
35 if (fsck->chk.valid_blk_cnt >= sbi->user_block_count) {
36 ERR_MSG("Not enough space\n");
37 return -ENOSPC;
38 }
39 if (is_node && fsck->chk.valid_node_cnt >=
40 sbi->total_node_count) {
41 ERR_MSG("Not enough space for node block\n");
42 return -ENOSPC;
43 }
44 } else {
45 if (sbi->total_valid_block_count >=
46 sbi->user_block_count) {
47 ERR_MSG("Not enough space\n");
48 return -ENOSPC;
49 }
50 if (is_node && sbi->total_valid_node_count >=
51 sbi->total_node_count) {
52 ERR_MSG("Not enough space for node block\n");
53 return -ENOSPC;
54 }
55 }
56 }
57
58 blkaddr = SM_I(sbi)->main_blkaddr;
59
60 if (le32_to_cpu(sbi->raw_super->feature) & F2FS_FEATURE_RO) {
61 if (IS_NODESEG(type)) {
62 type = CURSEG_HOT_NODE;
63 blkaddr = __end_block_addr(sbi);
64 left = 1;
65 } else if (IS_DATASEG(type)) {
66 type = CURSEG_HOT_DATA;
67 blkaddr = SM_I(sbi)->main_blkaddr;
68 left = 0;
69 }
70 }
71
72 if (find_next_free_block(sbi, &blkaddr, left, type, false)) {
73 ERR_MSG("Can't find free block");
74 ASSERT(0);
75 }
76
77 se = get_seg_entry(sbi, GET_SEGNO(sbi, blkaddr));
78 offset = OFFSET_IN_SEG(sbi, blkaddr);
79 se->type = se->orig_type = type;
80 if (se->valid_blocks == 0)
81 SM_I(sbi)->free_segments--;
82 se->valid_blocks++;
83 f2fs_set_bit(offset, (char *)se->cur_valid_map);
84 if (need_fsync_data_record(sbi)) {
85 se->ckpt_type = type;
86 se->ckpt_valid_blocks++;
87 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
88 }
89 if (c.func == FSCK) {
90 f2fs_set_main_bitmap(sbi, blkaddr, type);
91 f2fs_set_sit_bitmap(sbi, blkaddr);
92 }
93
94 if (old_blkaddr == NULL_ADDR) {
95 sbi->total_valid_block_count++;
96 if (is_node) {
97 sbi->total_valid_node_count++;
98 if (is_inode)
99 sbi->total_valid_inode_count++;
100 }
101 if (c.func == FSCK) {
102 fsck->chk.valid_blk_cnt++;
103 if (is_node) {
104 fsck->chk.valid_nat_entry_cnt++;
105 fsck->chk.valid_node_cnt++;
106 if (is_inode)
107 fsck->chk.valid_inode_cnt++;
108 }
109 }
110 }
111 se->dirty = 1;
112
113 /* read/write SSA */
114 *to = (block_t)blkaddr;
115 update_sum_entry(sbi, *to, sum);
116
117 return 0;
118 }
119
new_data_block(struct f2fs_sb_info * sbi,void * block,struct dnode_of_data * dn,int type)120 int new_data_block(struct f2fs_sb_info *sbi, void *block,
121 struct dnode_of_data *dn, int type)
122 {
123 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
124 struct f2fs_summary sum;
125 struct node_info ni;
126 unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node);
127 int ret;
128
129 if ((get_sb(feature) & F2FS_FEATURE_RO) &&
130 type != CURSEG_HOT_DATA)
131 type = CURSEG_HOT_DATA;
132
133 ASSERT(dn->node_blk);
134 memset(block, 0, F2FS_BLKSIZE);
135
136 get_node_info(sbi, dn->nid, &ni);
137 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
138
139 dn->data_blkaddr = blkaddr;
140 ret = reserve_new_block(sbi, &dn->data_blkaddr, &sum, type, 0);
141 if (ret) {
142 c.alloc_failed = 1;
143 return ret;
144 }
145
146 if (blkaddr == NULL_ADDR)
147 inc_inode_blocks(dn);
148 else if (blkaddr == NEW_ADDR)
149 dn->idirty = 1;
150 set_data_blkaddr(dn);
151 return 0;
152 }
153
f2fs_quota_size(struct quota_file * qf)154 u64 f2fs_quota_size(struct quota_file *qf)
155 {
156 struct node_info ni;
157 struct f2fs_node *inode;
158 u64 filesize;
159
160 inode = (struct f2fs_node *) calloc(F2FS_BLKSIZE, 1);
161 ASSERT(inode);
162
163 /* Read inode */
164 get_node_info(qf->sbi, qf->ino, &ni);
165 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
166 ASSERT(S_ISREG(le16_to_cpu(inode->i.i_mode)));
167
168 filesize = le64_to_cpu(inode->i.i_size);
169 free(inode);
170 return filesize;
171 }
172
f2fs_read(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)173 u64 f2fs_read(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
174 u64 count, pgoff_t offset)
175 {
176 struct dnode_of_data dn;
177 struct node_info ni;
178 struct f2fs_node *inode;
179 char *blk_buffer;
180 u64 filesize;
181 u64 off_in_blk;
182 u64 len_in_blk;
183 u64 read_count;
184 u64 remained_blkentries;
185 block_t blkaddr;
186 void *index_node = NULL;
187
188 memset(&dn, 0, sizeof(dn));
189
190 /* Memory allocation for block buffer and inode. */
191 blk_buffer = calloc(F2FS_BLKSIZE, 2);
192 ASSERT(blk_buffer);
193 inode = (struct f2fs_node*)(blk_buffer + F2FS_BLKSIZE);
194
195 /* Read inode */
196 get_node_info(sbi, ino, &ni);
197 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
198 ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
199 ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
200
201 /* Adjust count with file length. */
202 filesize = le64_to_cpu(inode->i.i_size);
203 if (offset > filesize)
204 count = 0;
205 else if (count + offset > filesize)
206 count = filesize - offset;
207
208 /* Main loop for file blocks */
209 read_count = remained_blkentries = 0;
210 while (count > 0) {
211 if (remained_blkentries == 0) {
212 set_new_dnode(&dn, inode, NULL, ino);
213 get_dnode_of_data(sbi, &dn, F2FS_BYTES_TO_BLK(offset),
214 LOOKUP_NODE);
215 if (index_node)
216 free(index_node);
217 index_node = (dn.node_blk == dn.inode_blk) ?
218 NULL : dn.node_blk;
219 remained_blkentries = ADDRS_PER_PAGE(sbi,
220 dn.node_blk, dn.inode_blk);
221 }
222 ASSERT(remained_blkentries > 0);
223
224 blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
225 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
226 break;
227
228 off_in_blk = offset % F2FS_BLKSIZE;
229 len_in_blk = F2FS_BLKSIZE - off_in_blk;
230 if (len_in_blk > count)
231 len_in_blk = count;
232
233 /* Read data from single block. */
234 if (len_in_blk < F2FS_BLKSIZE) {
235 ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
236 memcpy(buffer, blk_buffer + off_in_blk, len_in_blk);
237 } else {
238 /* Direct read */
239 ASSERT(dev_read_block(buffer, blkaddr) >= 0);
240 }
241
242 offset += len_in_blk;
243 count -= len_in_blk;
244 buffer += len_in_blk;
245 read_count += len_in_blk;
246
247 dn.ofs_in_node++;
248 remained_blkentries--;
249 }
250 if (index_node)
251 free(index_node);
252 free(blk_buffer);
253
254 return read_count;
255 }
256
257 /*
258 * Do not call this function directly. Instead, call one of the following:
259 * u64 f2fs_write();
260 * u64 f2fs_write_compress_data();
261 * u64 f2fs_write_addrtag();
262 */
f2fs_write_ex(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset,enum wr_addr_type addr_type)263 static u64 f2fs_write_ex(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
264 u64 count, pgoff_t offset, enum wr_addr_type addr_type)
265 {
266 struct dnode_of_data dn;
267 struct node_info ni;
268 struct f2fs_node *inode;
269 char *blk_buffer;
270 void *wbuf;
271 u64 off_in_blk;
272 u64 len_in_blk;
273 u64 written_count;
274 u64 remained_blkentries;
275 block_t blkaddr;
276 void* index_node = NULL;
277 int idirty = 0;
278 int err, ret;
279 bool datablk_alloced = false;
280 bool has_data = (addr_type == WR_NORMAL
281 || addr_type == WR_COMPRESS_DATA);
282
283 if (count == 0)
284 return 0;
285
286 /*
287 * Enforce calling from f2fs_write(), f2fs_write_compress_data(),
288 * and f2fs_write_addrtag(). Beside, check if is properly called.
289 */
290 ASSERT((!has_data && buffer == NULL) || (has_data && buffer != NULL));
291 if (addr_type != WR_NORMAL)
292 ASSERT(offset % F2FS_BLKSIZE == 0); /* block boundary only */
293
294 /* Memory allocation for block buffer and inode. */
295 blk_buffer = calloc(F2FS_BLKSIZE, 2);
296 ASSERT(blk_buffer);
297 inode = (struct f2fs_node*)(blk_buffer + F2FS_BLKSIZE);
298
299 /* Read inode */
300 get_node_info(sbi, ino, &ni);
301 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
302 ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
303 ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
304
305 /* Main loop for file blocks */
306 written_count = remained_blkentries = 0;
307 while (count > 0) {
308 if (remained_blkentries == 0) {
309 set_new_dnode(&dn, inode, NULL, ino);
310 err = get_dnode_of_data(sbi, &dn,
311 F2FS_BYTES_TO_BLK(offset), ALLOC_NODE);
312 if (err)
313 break;
314 idirty |= dn.idirty;
315 free(index_node);
316 index_node = (dn.node_blk == dn.inode_blk) ?
317 NULL : dn.node_blk;
318 remained_blkentries = ADDRS_PER_PAGE(sbi,
319 dn.node_blk, dn.inode_blk) -
320 dn.ofs_in_node;
321 }
322 ASSERT(remained_blkentries > 0);
323
324 if (!has_data) {
325 struct seg_entry *se;
326
327 se = get_seg_entry(sbi, GET_SEGNO(sbi, dn.node_blkaddr));
328 dn.data_blkaddr = addr_type;
329 set_data_blkaddr(&dn);
330 idirty |= dn.idirty;
331 if (dn.ndirty) {
332 ret = dn.alloced ? dev_write_block(dn.node_blk,
333 dn.node_blkaddr,
334 f2fs_io_type_to_rw_hint(se->type)) :
335 update_block(sbi, dn.node_blk,
336 &dn.node_blkaddr, NULL);
337 ASSERT(ret >= 0);
338 }
339 written_count = 0;
340 break;
341 }
342
343 datablk_alloced = false;
344 blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
345 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
346 err = new_data_block(sbi, blk_buffer,
347 &dn, CURSEG_WARM_DATA);
348 if (err)
349 break;
350 blkaddr = dn.data_blkaddr;
351 idirty |= dn.idirty;
352 datablk_alloced = true;
353 }
354
355 off_in_blk = offset % F2FS_BLKSIZE;
356 len_in_blk = F2FS_BLKSIZE - off_in_blk;
357 if (len_in_blk > count)
358 len_in_blk = count;
359
360 /* Write data to single block. */
361 if (len_in_blk < F2FS_BLKSIZE) {
362 ASSERT(dev_read_block(blk_buffer, blkaddr) >= 0);
363 memcpy(blk_buffer + off_in_blk, buffer, len_in_blk);
364 wbuf = blk_buffer;
365 } else {
366 /* Direct write */
367 wbuf = buffer;
368 }
369
370 if (c.zoned_model == F2FS_ZONED_HM) {
371 if (datablk_alloced) {
372 ret = dev_write_block(wbuf, blkaddr,
373 f2fs_io_type_to_rw_hint(CURSEG_WARM_DATA));
374 } else {
375 ret = update_block(sbi, wbuf, &blkaddr,
376 dn.node_blk);
377 if (dn.inode_blk == dn.node_blk)
378 idirty = 1;
379 else
380 dn.ndirty = 1;
381 }
382 } else {
383 ret = dev_write_block(wbuf, blkaddr,
384 f2fs_io_type_to_rw_hint(CURSEG_WARM_DATA));
385 }
386 ASSERT(ret >= 0);
387
388 offset += len_in_blk;
389 count -= len_in_blk;
390 buffer += len_in_blk;
391 written_count += len_in_blk;
392
393 dn.ofs_in_node++;
394 if ((--remained_blkentries == 0 || count == 0) && (dn.ndirty)) {
395 struct seg_entry *se;
396 se = get_seg_entry(sbi, GET_SEGNO(sbi, dn.node_blkaddr));
397 ret = dn.alloced ?
398 dev_write_block(dn.node_blk, dn.node_blkaddr,
399 f2fs_io_type_to_rw_hint(se->type)) :
400 update_block(sbi, dn.node_blk, &dn.node_blkaddr, NULL);
401 ASSERT(ret >= 0);
402 }
403 }
404
405 if (addr_type == WR_NORMAL && offset > le64_to_cpu(inode->i.i_size)) {
406 inode->i.i_size = cpu_to_le64(offset);
407 idirty = 1;
408 }
409 if (idirty) {
410 get_node_info(sbi, ino, &ni);
411 ASSERT(inode == dn.inode_blk);
412 ASSERT(update_inode(sbi, inode, &ni.blk_addr) >= 0);
413 }
414
415 free(index_node);
416 free(blk_buffer);
417
418 return written_count;
419 }
420
f2fs_write(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)421 u64 f2fs_write(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
422 u64 count, pgoff_t offset)
423 {
424 return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_NORMAL);
425 }
426
f2fs_write_compress_data(struct f2fs_sb_info * sbi,nid_t ino,u8 * buffer,u64 count,pgoff_t offset)427 u64 f2fs_write_compress_data(struct f2fs_sb_info *sbi, nid_t ino, u8 *buffer,
428 u64 count, pgoff_t offset)
429 {
430 return f2fs_write_ex(sbi, ino, buffer, count, offset, WR_COMPRESS_DATA);
431 }
432
f2fs_write_addrtag(struct f2fs_sb_info * sbi,nid_t ino,pgoff_t offset,unsigned int addrtag)433 u64 f2fs_write_addrtag(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
434 unsigned int addrtag)
435 {
436 ASSERT(addrtag == COMPRESS_ADDR || addrtag == NEW_ADDR
437 || addrtag == NULL_ADDR);
438 return f2fs_write_ex(sbi, ino, NULL, F2FS_BLKSIZE, offset, addrtag);
439 }
440
441 /* This function updates only inode->i.i_size */
f2fs_filesize_update(struct f2fs_sb_info * sbi,nid_t ino,u64 filesize)442 void f2fs_filesize_update(struct f2fs_sb_info *sbi, nid_t ino, u64 filesize)
443 {
444 struct node_info ni;
445 struct f2fs_node *inode;
446
447 inode = calloc(F2FS_BLKSIZE, 1);
448 ASSERT(inode);
449 get_node_info(sbi, ino, &ni);
450
451 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
452 ASSERT(!S_ISDIR(le16_to_cpu(inode->i.i_mode)));
453 ASSERT(!S_ISLNK(le16_to_cpu(inode->i.i_mode)));
454
455 inode->i.i_size = cpu_to_le64(filesize);
456
457 ASSERT(update_inode(sbi, inode, &ni.blk_addr) >= 0);
458 free(inode);
459 }
460
461 #define MAX_BULKR_RETRY 5
bulkread(int fd,void * rbuf,size_t rsize,bool * eof)462 int bulkread(int fd, void *rbuf, size_t rsize, bool *eof)
463 {
464 int n = 0;
465 int retry = MAX_BULKR_RETRY;
466 int cur;
467
468 if (!rsize)
469 return 0;
470
471 if (eof != NULL)
472 *eof = false;
473 while (rsize && (cur = read(fd, rbuf, rsize)) != 0) {
474 if (cur == -1) {
475 if (errno == EINTR && retry--)
476 continue;
477 return -1;
478 }
479 retry = MAX_BULKR_RETRY;
480
481 rsize -= cur;
482 n += cur;
483 }
484 if (eof != NULL)
485 *eof = (cur == 0);
486 return n;
487 }
488
f2fs_fix_mutable(struct f2fs_sb_info * sbi,nid_t ino,pgoff_t offset,unsigned int compressed)489 u64 f2fs_fix_mutable(struct f2fs_sb_info *sbi, nid_t ino, pgoff_t offset,
490 unsigned int compressed)
491 {
492 unsigned int i;
493 u64 wlen;
494
495 if (c.compress.readonly)
496 return 0;
497
498 for (i = 0; i < compressed - 1; i++) {
499 wlen = f2fs_write_addrtag(sbi, ino,
500 offset + (i << F2FS_BLKSIZE_BITS), NEW_ADDR);
501 if (wlen)
502 return wlen;
503 }
504 return 0;
505 }
506
is_consecutive(u32 prev_addr,u32 cur_addr)507 static inline int is_consecutive(u32 prev_addr, u32 cur_addr)
508 {
509 if (is_valid_data_blkaddr(cur_addr) && (cur_addr == prev_addr + 1))
510 return 1;
511 return 0;
512 }
513
copy_extent_info(struct extent_info * t_ext,struct extent_info * s_ext)514 static inline void copy_extent_info(struct extent_info *t_ext,
515 struct extent_info *s_ext)
516 {
517 t_ext->fofs = s_ext->fofs;
518 t_ext->blk = s_ext->blk;
519 t_ext->len = s_ext->len;
520 }
521
update_extent_info(struct f2fs_node * inode,struct extent_info * ext)522 static inline void update_extent_info(struct f2fs_node *inode,
523 struct extent_info *ext)
524 {
525 inode->i.i_ext.fofs = cpu_to_le32(ext->fofs);
526 inode->i.i_ext.blk_addr = cpu_to_le32(ext->blk);
527 inode->i.i_ext.len = cpu_to_le32(ext->len);
528 }
529
update_largest_extent(struct f2fs_sb_info * sbi,nid_t ino)530 static void update_largest_extent(struct f2fs_sb_info *sbi, nid_t ino)
531 {
532 struct dnode_of_data dn;
533 struct node_info ni;
534 struct f2fs_node *inode;
535 u32 blkaddr, prev_blkaddr, cur_blk = 0, end_blk;
536 struct extent_info largest_ext = { 0, }, cur_ext = { 0, };
537 u64 remained_blkentries = 0;
538 u32 cluster_size;
539 int count;
540 void *index_node = NULL;
541
542 memset(&dn, 0, sizeof(dn));
543 largest_ext.len = cur_ext.len = 0;
544
545 inode = (struct f2fs_node *) calloc(F2FS_BLKSIZE, 1);
546 ASSERT(inode);
547
548 /* Read inode info */
549 get_node_info(sbi, ino, &ni);
550 ASSERT(dev_read_block(inode, ni.blk_addr) >= 0);
551 cluster_size = 1 << inode->i.i_log_cluster_size;
552
553 if (inode->i.i_inline & F2FS_INLINE_DATA)
554 goto exit;
555
556 end_blk = f2fs_max_file_offset(&inode->i) >> F2FS_BLKSIZE_BITS;
557
558 while (cur_blk <= end_blk) {
559 if (remained_blkentries == 0) {
560 set_new_dnode(&dn, inode, NULL, ino);
561 get_dnode_of_data(sbi, &dn, cur_blk, LOOKUP_NODE);
562 if (index_node)
563 free(index_node);
564 index_node = (dn.node_blk == dn.inode_blk) ?
565 NULL : dn.node_blk;
566 remained_blkentries = ADDRS_PER_PAGE(sbi,
567 dn.node_blk, dn.inode_blk);
568 }
569 ASSERT(remained_blkentries > 0);
570
571 blkaddr = datablock_addr(dn.node_blk, dn.ofs_in_node);
572 if (cur_ext.len > 0) {
573 if (is_consecutive(prev_blkaddr, blkaddr))
574 cur_ext.len++;
575 else {
576 if (cur_ext.len > largest_ext.len)
577 copy_extent_info(&largest_ext,
578 &cur_ext);
579 cur_ext.len = 0;
580 }
581 }
582
583 if (cur_ext.len == 0 && is_valid_data_blkaddr(blkaddr)) {
584 cur_ext.fofs = cur_blk;
585 cur_ext.len = 1;
586 cur_ext.blk = blkaddr;
587 }
588
589 prev_blkaddr = blkaddr;
590 count = blkaddr == COMPRESS_ADDR ? cluster_size : 1;
591 cur_blk += count;
592 dn.ofs_in_node += count;
593 remained_blkentries -= count;
594 }
595
596 exit:
597 if (cur_ext.len > largest_ext.len)
598 copy_extent_info(&largest_ext, &cur_ext);
599 if (largest_ext.len > 0) {
600 update_extent_info(inode, &largest_ext);
601 ASSERT(update_inode(sbi, inode, &ni.blk_addr) >= 0);
602 }
603
604 if (index_node)
605 free(index_node);
606 free(inode);
607 }
608
f2fs_build_file(struct f2fs_sb_info * sbi,struct dentry * de)609 int f2fs_build_file(struct f2fs_sb_info *sbi, struct dentry *de)
610 {
611 int fd, n = -1;
612 pgoff_t off = 0;
613 u8 buffer[F2FS_BLKSIZE];
614 struct node_info ni;
615 struct f2fs_node *node_blk;
616
617 if (de->ino == 0)
618 return -1;
619
620 if (de->from_devino) {
621 struct hardlink_cache_entry *found_hardlink;
622
623 found_hardlink = f2fs_search_hardlink(sbi, de);
624 if (found_hardlink && found_hardlink->to_ino &&
625 found_hardlink->nbuild)
626 return 0;
627
628 found_hardlink->nbuild++;
629 }
630
631 fd = open(de->full_path, O_RDONLY);
632 if (fd < 0) {
633 MSG(0, "Skip: Fail to open %s\n", de->full_path);
634 return -1;
635 }
636
637 /* inline_data support */
638 if (de->size <= DEF_MAX_INLINE_DATA) {
639 int ret;
640
641 get_node_info(sbi, de->ino, &ni);
642
643 node_blk = calloc(F2FS_BLKSIZE, 1);
644 ASSERT(node_blk);
645
646 ret = dev_read_block(node_blk, ni.blk_addr);
647 ASSERT(ret >= 0);
648
649 node_blk->i.i_inline |= F2FS_INLINE_DATA;
650 node_blk->i.i_inline |= F2FS_DATA_EXIST;
651
652 if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
653 node_blk->i.i_inline |= F2FS_EXTRA_ATTR;
654 node_blk->i.i_extra_isize =
655 cpu_to_le16(calc_extra_isize());
656 }
657 n = read(fd, buffer, F2FS_BLKSIZE);
658 ASSERT((unsigned long)n == de->size);
659 memcpy(inline_data_addr(node_blk), buffer, de->size);
660 node_blk->i.i_size = cpu_to_le64(de->size);
661 ASSERT(update_inode(sbi, node_blk, &ni.blk_addr) >= 0);
662 free(node_blk);
663 #ifdef WITH_SLOAD
664 } else if (c.func == SLOAD && c.compress.enabled &&
665 c.compress.filter_ops->filter(de->full_path)) {
666 bool eof = false;
667 u8 *rbuf = c.compress.cc.rbuf;
668 unsigned int cblocks = 0;
669
670 node_blk = calloc(F2FS_BLKSIZE, 1);
671 ASSERT(node_blk);
672
673 /* read inode */
674 get_node_info(sbi, de->ino, &ni);
675 ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
676 /* update inode meta */
677 node_blk->i.i_compress_algorithm = c.compress.alg;
678 node_blk->i.i_log_cluster_size =
679 c.compress.cc.log_cluster_size;
680 node_blk->i.i_flags = cpu_to_le32(F2FS_COMPR_FL);
681 if (c.compress.readonly)
682 node_blk->i.i_inline |= F2FS_COMPRESS_RELEASED;
683 ASSERT(update_inode(sbi, node_blk, &ni.blk_addr) >= 0);
684
685 while (!eof && (n = bulkread(fd, rbuf, c.compress.cc.rlen,
686 &eof)) > 0) {
687 int ret = c.compress.ops->compress(&c.compress.cc);
688 u64 wlen;
689 u32 csize = ALIGN_UP(c.compress.cc.clen +
690 COMPRESS_HEADER_SIZE, F2FS_BLKSIZE);
691 unsigned int cur_cblk;
692
693 if (ret || n < c.compress.cc.rlen ||
694 n < (int)(csize + F2FS_BLKSIZE *
695 c.compress.min_blocks)) {
696 wlen = f2fs_write(sbi, de->ino, rbuf, n, off);
697 ASSERT((int)wlen == n);
698 } else {
699 wlen = f2fs_write_addrtag(sbi, de->ino, off,
700 WR_COMPRESS_ADDR);
701 ASSERT(!wlen);
702 wlen = f2fs_write_compress_data(sbi, de->ino,
703 (u8 *)c.compress.cc.cbuf,
704 csize, off + F2FS_BLKSIZE);
705 ASSERT(wlen == csize);
706 c.compress.ops->reset(&c.compress.cc);
707 cur_cblk = (c.compress.cc.rlen - csize) /
708 F2FS_BLKSIZE;
709 cblocks += cur_cblk;
710 wlen = f2fs_fix_mutable(sbi, de->ino,
711 off + F2FS_BLKSIZE + csize,
712 cur_cblk);
713 ASSERT(!wlen);
714 }
715 off += n;
716 }
717 if (n == -1) {
718 fprintf(stderr, "Load file '%s' failed: ",
719 de->full_path);
720 perror(NULL);
721 }
722 /* read inode */
723 get_node_info(sbi, de->ino, &ni);
724 ASSERT(dev_read_block(node_blk, ni.blk_addr) >= 0);
725 /* update inode meta */
726 node_blk->i.i_size = cpu_to_le64(off);
727 if (!c.compress.readonly) {
728 node_blk->i.i_compr_blocks = cpu_to_le64(cblocks);
729 node_blk->i.i_blocks += cpu_to_le64(cblocks);
730 }
731 ASSERT(update_inode(sbi, node_blk, &ni.blk_addr) >= 0);
732 free(node_blk);
733
734 if (!c.compress.readonly) {
735 sbi->total_valid_block_count += cblocks;
736 if (sbi->total_valid_block_count >=
737 sbi->user_block_count) {
738 ERR_MSG("Not enough space\n");
739 ASSERT(0);
740 }
741 }
742 #endif
743 } else {
744 while ((n = read(fd, buffer, F2FS_BLKSIZE)) > 0) {
745 f2fs_write(sbi, de->ino, buffer, n, off);
746 off += n;
747 }
748 }
749
750 close(fd);
751 if (n < 0)
752 return -1;
753
754 if (!c.compress.enabled || (c.feature & F2FS_FEATURE_RO))
755 update_largest_extent(sbi, de->ino);
756 update_free_segments(sbi);
757
758 MSG(1, "Info: Create %s -> %s\n"
759 " -- ino=%x, type=%x, mode=%x, uid=%x, "
760 "gid=%x, cap=%"PRIx64", size=%lu, pino=%x\n",
761 de->full_path, de->path,
762 de->ino, de->file_type, de->mode,
763 de->uid, de->gid, de->capabilities, de->size, de->pino);
764 return 0;
765 }
766
update_block(struct f2fs_sb_info * sbi,void * buf,u32 * blkaddr,struct f2fs_node * node_blk)767 int update_block(struct f2fs_sb_info *sbi, void *buf, u32 *blkaddr,
768 struct f2fs_node *node_blk)
769 {
770 struct seg_entry *se;
771 struct f2fs_summary sum;
772 u64 new_blkaddr, old_blkaddr = *blkaddr, offset;
773 int ret, type;
774
775 if (c.zoned_model != F2FS_ZONED_HM)
776 return dev_write_block(buf, old_blkaddr, WRITE_LIFE_NONE);
777
778 /* update sit bitmap & valid_blocks && se->type for old block*/
779 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
780 offset = OFFSET_IN_SEG(sbi, old_blkaddr);
781 type = se->type;
782 se->valid_blocks--;
783 f2fs_clear_bit(offset, (char *)se->cur_valid_map);
784 if (need_fsync_data_record(sbi))
785 f2fs_clear_bit(offset, (char *)se->ckpt_valid_map);
786 se->dirty = 1;
787 f2fs_clear_main_bitmap(sbi, old_blkaddr);
788 f2fs_clear_sit_bitmap(sbi, old_blkaddr);
789
790 new_blkaddr = SM_I(sbi)->main_blkaddr;
791 if (find_next_free_block(sbi, &new_blkaddr, 0, type, false)) {
792 ERR_MSG("Can't find free block for the update");
793 ASSERT(0);
794 }
795
796 ret = dev_write_block(buf, new_blkaddr, f2fs_io_type_to_rw_hint(type));
797 ASSERT(ret >= 0);
798
799 *blkaddr = new_blkaddr;
800
801 /* update sit bitmap & valid_blocks && se->type for new block */
802 se = get_seg_entry(sbi, GET_SEGNO(sbi, new_blkaddr));
803 offset = OFFSET_IN_SEG(sbi, new_blkaddr);
804 se->type = se->orig_type = type;
805 se->valid_blocks++;
806 f2fs_set_bit(offset, (char *)se->cur_valid_map);
807 if (need_fsync_data_record(sbi))
808 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
809 se->dirty = 1;
810 f2fs_set_main_bitmap(sbi, new_blkaddr, type);
811 f2fs_set_sit_bitmap(sbi, new_blkaddr);
812
813 /* update SSA */
814 get_sum_entry(sbi, old_blkaddr, &sum);
815 update_sum_entry(sbi, new_blkaddr, &sum);
816
817 if (IS_DATASEG(type)) {
818 update_data_blkaddr(sbi, le32_to_cpu(sum.nid),
819 le16_to_cpu(sum.ofs_in_node), new_blkaddr, node_blk);
820 } else
821 update_nat_blkaddr(sbi, 0, le32_to_cpu(sum.nid), new_blkaddr);
822
823 DBG(1, "Update %s block %"PRIx64" -> %"PRIx64"\n",
824 IS_DATASEG(type) ? "data" : "node", old_blkaddr, new_blkaddr);
825 return ret;
826 }
827