1 /*
2 * Unsquash a squashfs filesystem. This is a highly compressed read only
3 * filesystem.
4 *
5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
6 * 2012, 2013, 2014
7 * Phillip Lougher <[email protected]>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2,
12 * or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * unsquashfs.c
24 */
25
26 #include "unsquashfs.h"
27 #include "squashfs_swap.h"
28 #include "squashfs_compat.h"
29 #include "compressor.h"
30 #include "xattr.h"
31 #include "unsquashfs_info.h"
32 #include "stdarg.h"
33
34 #ifndef linux
35 #include <sys/sysctl.h>
36 #else
37 #include <sys/sysinfo.h>
38 #endif
39
40 #include <sys/sysmacros.h>
41 #include <sys/types.h>
42 #include <sys/time.h>
43 #include <sys/resource.h>
44 #include <limits.h>
45 #include <ctype.h>
46
47 struct cache *fragment_cache, *data_cache;
48 struct queue *to_reader, *to_inflate, *to_writer, *from_writer;
49 pthread_t *thread, *inflator_thread;
50 pthread_mutex_t fragment_mutex;
51
52 /* user options that control parallelisation */
53 int processors = -1;
54
55 struct super_block sBlk;
56 squashfs_operations s_ops;
57 struct compressor *comp;
58
59 int bytes = 0, swap, file_count = 0, dir_count = 0, sym_count = 0,
60 dev_count = 0, fifo_count = 0;
61 char *inode_table = NULL, *directory_table = NULL;
62 struct hash_table_entry *inode_table_hash[65536], *directory_table_hash[65536];
63 int fd;
64 unsigned int *uid_table, *guid_table;
65 unsigned int cached_frag = SQUASHFS_INVALID_FRAG;
66 char *fragment_data;
67 char *file_data;
68 char *data;
69 unsigned int block_size;
70 unsigned int block_log;
71 int lsonly = FALSE, info = FALSE, force = FALSE, short_ls = TRUE;
72 int use_regex = FALSE;
73 char **created_inode;
74 int root_process;
75 int columns;
76 int rotate = 0;
77 pthread_mutex_t screen_mutex;
78 int progress = TRUE, progress_enabled = FALSE;
79 unsigned int total_blocks = 0, total_files = 0, total_inodes = 0;
80 unsigned int cur_blocks = 0;
81 int inode_number = 1;
82 int no_xattrs = XATTR_DEF;
83 int user_xattrs = FALSE;
84
85 int lookup_type[] = {
86 0,
87 S_IFDIR,
88 S_IFREG,
89 S_IFLNK,
90 S_IFBLK,
91 S_IFCHR,
92 S_IFIFO,
93 S_IFSOCK,
94 S_IFDIR,
95 S_IFREG,
96 S_IFLNK,
97 S_IFBLK,
98 S_IFCHR,
99 S_IFIFO,
100 S_IFSOCK
101 };
102
103 struct test table[] = {
104 { S_IFMT, S_IFSOCK, 0, 's' },
105 { S_IFMT, S_IFLNK, 0, 'l' },
106 { S_IFMT, S_IFBLK, 0, 'b' },
107 { S_IFMT, S_IFDIR, 0, 'd' },
108 { S_IFMT, S_IFCHR, 0, 'c' },
109 { S_IFMT, S_IFIFO, 0, 'p' },
110 { S_IRUSR, S_IRUSR, 1, 'r' },
111 { S_IWUSR, S_IWUSR, 2, 'w' },
112 { S_IRGRP, S_IRGRP, 4, 'r' },
113 { S_IWGRP, S_IWGRP, 5, 'w' },
114 { S_IROTH, S_IROTH, 7, 'r' },
115 { S_IWOTH, S_IWOTH, 8, 'w' },
116 { S_IXUSR | S_ISUID, S_IXUSR | S_ISUID, 3, 's' },
117 { S_IXUSR | S_ISUID, S_ISUID, 3, 'S' },
118 { S_IXUSR | S_ISUID, S_IXUSR, 3, 'x' },
119 { S_IXGRP | S_ISGID, S_IXGRP | S_ISGID, 6, 's' },
120 { S_IXGRP | S_ISGID, S_ISGID, 6, 'S' },
121 { S_IXGRP | S_ISGID, S_IXGRP, 6, 'x' },
122 { S_IXOTH | S_ISVTX, S_IXOTH | S_ISVTX, 9, 't' },
123 { S_IXOTH | S_ISVTX, S_ISVTX, 9, 'T' },
124 { S_IXOTH | S_ISVTX, S_IXOTH, 9, 'x' },
125 { 0, 0, 0, 0}
126 };
127
128 void progress_bar(long long current, long long max, int columns);
129
130 #define MAX_LINE 16384
131
prep_exit()132 void prep_exit()
133 {
134 }
135
136
sigwinch_handler()137 void sigwinch_handler()
138 {
139 struct winsize winsize;
140
141 if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
142 if(isatty(STDOUT_FILENO))
143 ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
144 "columns\n");
145 columns = 80;
146 } else
147 columns = winsize.ws_col;
148 }
149
150
sigalrm_handler()151 void sigalrm_handler()
152 {
153 rotate = (rotate + 1) % 4;
154 }
155
156
add_overflow(int a,int b)157 int add_overflow(int a, int b)
158 {
159 return (INT_MAX - a) < b;
160 }
161
162
shift_overflow(int a,int shift)163 int shift_overflow(int a, int shift)
164 {
165 return (INT_MAX >> shift) < a;
166 }
167
168
multiply_overflow(int a,int multiplier)169 int multiply_overflow(int a, int multiplier)
170 {
171 return (INT_MAX / multiplier) < a;
172 }
173
174
queue_init(int size)175 struct queue *queue_init(int size)
176 {
177 struct queue *queue = malloc(sizeof(struct queue));
178
179 if(queue == NULL)
180 EXIT_UNSQUASH("Out of memory in queue_init\n");
181
182 if(add_overflow(size, 1) ||
183 multiply_overflow(size + 1, sizeof(void *)))
184 EXIT_UNSQUASH("Size too large in queue_init\n");
185
186 queue->data = malloc(sizeof(void *) * (size + 1));
187 if(queue->data == NULL)
188 EXIT_UNSQUASH("Out of memory in queue_init\n");
189
190 queue->size = size + 1;
191 queue->readp = queue->writep = 0;
192 pthread_mutex_init(&queue->mutex, NULL);
193 pthread_cond_init(&queue->empty, NULL);
194 pthread_cond_init(&queue->full, NULL);
195
196 return queue;
197 }
198
199
queue_put(struct queue * queue,void * data)200 void queue_put(struct queue *queue, void *data)
201 {
202 int nextp;
203
204 pthread_mutex_lock(&queue->mutex);
205
206 while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
207 pthread_cond_wait(&queue->full, &queue->mutex);
208
209 queue->data[queue->writep] = data;
210 queue->writep = nextp;
211 pthread_cond_signal(&queue->empty);
212 pthread_mutex_unlock(&queue->mutex);
213 }
214
215
queue_get(struct queue * queue)216 void *queue_get(struct queue *queue)
217 {
218 void *data;
219 pthread_mutex_lock(&queue->mutex);
220
221 while(queue->readp == queue->writep)
222 pthread_cond_wait(&queue->empty, &queue->mutex);
223
224 data = queue->data[queue->readp];
225 queue->readp = (queue->readp + 1) % queue->size;
226 pthread_cond_signal(&queue->full);
227 pthread_mutex_unlock(&queue->mutex);
228
229 return data;
230 }
231
232
dump_queue(struct queue * queue)233 void dump_queue(struct queue *queue)
234 {
235 pthread_mutex_lock(&queue->mutex);
236
237 printf("Max size %d, size %d%s\n", queue->size - 1,
238 queue->readp <= queue->writep ? queue->writep - queue->readp :
239 queue->size - queue->readp + queue->writep,
240 queue->readp == queue->writep ? " (EMPTY)" :
241 ((queue->writep + 1) % queue->size) == queue->readp ?
242 " (FULL)" : "");
243
244 pthread_mutex_unlock(&queue->mutex);
245 }
246
247
248 /* Called with the cache mutex held */
insert_hash_table(struct cache * cache,struct cache_entry * entry)249 void insert_hash_table(struct cache *cache, struct cache_entry *entry)
250 {
251 int hash = CALCULATE_HASH(entry->block);
252
253 entry->hash_next = cache->hash_table[hash];
254 cache->hash_table[hash] = entry;
255 entry->hash_prev = NULL;
256 if(entry->hash_next)
257 entry->hash_next->hash_prev = entry;
258 }
259
260
261 /* Called with the cache mutex held */
remove_hash_table(struct cache * cache,struct cache_entry * entry)262 void remove_hash_table(struct cache *cache, struct cache_entry *entry)
263 {
264 if(entry->hash_prev)
265 entry->hash_prev->hash_next = entry->hash_next;
266 else
267 cache->hash_table[CALCULATE_HASH(entry->block)] =
268 entry->hash_next;
269 if(entry->hash_next)
270 entry->hash_next->hash_prev = entry->hash_prev;
271
272 entry->hash_prev = entry->hash_next = NULL;
273 }
274
275
276 /* Called with the cache mutex held */
insert_free_list(struct cache * cache,struct cache_entry * entry)277 void insert_free_list(struct cache *cache, struct cache_entry *entry)
278 {
279 if(cache->free_list) {
280 entry->free_next = cache->free_list;
281 entry->free_prev = cache->free_list->free_prev;
282 cache->free_list->free_prev->free_next = entry;
283 cache->free_list->free_prev = entry;
284 } else {
285 cache->free_list = entry;
286 entry->free_prev = entry->free_next = entry;
287 }
288 }
289
290
291 /* Called with the cache mutex held */
remove_free_list(struct cache * cache,struct cache_entry * entry)292 void remove_free_list(struct cache *cache, struct cache_entry *entry)
293 {
294 if(entry->free_prev == NULL || entry->free_next == NULL)
295 /* not in free list */
296 return;
297 else if(entry->free_prev == entry && entry->free_next == entry) {
298 /* only this entry in the free list */
299 cache->free_list = NULL;
300 } else {
301 /* more than one entry in the free list */
302 entry->free_next->free_prev = entry->free_prev;
303 entry->free_prev->free_next = entry->free_next;
304 if(cache->free_list == entry)
305 cache->free_list = entry->free_next;
306 }
307
308 entry->free_prev = entry->free_next = NULL;
309 }
310
311
cache_init(int buffer_size,int max_buffers)312 struct cache *cache_init(int buffer_size, int max_buffers)
313 {
314 struct cache *cache = malloc(sizeof(struct cache));
315
316 if(cache == NULL)
317 EXIT_UNSQUASH("Out of memory in cache_init\n");
318
319 cache->max_buffers = max_buffers;
320 cache->buffer_size = buffer_size;
321 cache->count = 0;
322 cache->used = 0;
323 cache->free_list = NULL;
324 memset(cache->hash_table, 0, sizeof(struct cache_entry *) * 65536);
325 cache->wait_free = FALSE;
326 cache->wait_pending = FALSE;
327 pthread_mutex_init(&cache->mutex, NULL);
328 pthread_cond_init(&cache->wait_for_free, NULL);
329 pthread_cond_init(&cache->wait_for_pending, NULL);
330
331 return cache;
332 }
333
334
cache_get(struct cache * cache,long long block,int size)335 struct cache_entry *cache_get(struct cache *cache, long long block, int size)
336 {
337 /*
338 * Get a block out of the cache. If the block isn't in the cache
339 * it is added and queued to the reader() and inflate() threads for
340 * reading off disk and decompression. The cache grows until max_blocks
341 * is reached, once this occurs existing discarded blocks on the free
342 * list are reused
343 */
344 int hash = CALCULATE_HASH(block);
345 struct cache_entry *entry;
346
347 pthread_mutex_lock(&cache->mutex);
348
349 for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
350 if(entry->block == block)
351 break;
352
353 if(entry) {
354 /*
355 * found the block in the cache. If the block is currently unused
356 * remove it from the free list and increment cache used count.
357 */
358 if(entry->used == 0) {
359 cache->used ++;
360 remove_free_list(cache, entry);
361 }
362 entry->used ++;
363 pthread_mutex_unlock(&cache->mutex);
364 } else {
365 /*
366 * not in the cache
367 *
368 * first try to allocate new block
369 */
370 if(cache->count < cache->max_buffers) {
371 entry = malloc(sizeof(struct cache_entry));
372 if(entry == NULL)
373 EXIT_UNSQUASH("Out of memory in cache_get\n");
374 entry->data = malloc(cache->buffer_size);
375 if(entry->data == NULL)
376 EXIT_UNSQUASH("Out of memory in cache_get\n");
377 entry->cache = cache;
378 entry->free_prev = entry->free_next = NULL;
379 cache->count ++;
380 } else {
381 /*
382 * try to get from free list
383 */
384 while(cache->free_list == NULL) {
385 cache->wait_free = TRUE;
386 pthread_cond_wait(&cache->wait_for_free,
387 &cache->mutex);
388 }
389 entry = cache->free_list;
390 remove_free_list(cache, entry);
391 remove_hash_table(cache, entry);
392 }
393
394 /*
395 * Initialise block and insert into the hash table.
396 * Increment used which tracks how many buffers in the
397 * cache are actively in use (the other blocks, count - used,
398 * are in the cache and available for lookup, but can also be
399 * re-used).
400 */
401 entry->block = block;
402 entry->size = size;
403 entry->used = 1;
404 entry->error = FALSE;
405 entry->pending = TRUE;
406 insert_hash_table(cache, entry);
407 cache->used ++;
408
409 /*
410 * queue to read thread to read and ultimately (via the
411 * decompress threads) decompress the buffer
412 */
413 pthread_mutex_unlock(&cache->mutex);
414 queue_put(to_reader, entry);
415 }
416
417 return entry;
418 }
419
420
cache_block_ready(struct cache_entry * entry,int error)421 void cache_block_ready(struct cache_entry *entry, int error)
422 {
423 /*
424 * mark cache entry as being complete, reading and (if necessary)
425 * decompression has taken place, and the buffer is valid for use.
426 * If an error occurs reading or decompressing, the buffer also
427 * becomes ready but with an error...
428 */
429 pthread_mutex_lock(&entry->cache->mutex);
430 entry->pending = FALSE;
431 entry->error = error;
432
433 /*
434 * if the wait_pending flag is set, one or more threads may be waiting
435 * on this buffer
436 */
437 if(entry->cache->wait_pending) {
438 entry->cache->wait_pending = FALSE;
439 pthread_cond_broadcast(&entry->cache->wait_for_pending);
440 }
441
442 pthread_mutex_unlock(&entry->cache->mutex);
443 }
444
445
cache_block_wait(struct cache_entry * entry)446 void cache_block_wait(struct cache_entry *entry)
447 {
448 /*
449 * wait for this cache entry to become ready, when reading and (if
450 * necessary) decompression has taken place
451 */
452 pthread_mutex_lock(&entry->cache->mutex);
453
454 while(entry->pending) {
455 entry->cache->wait_pending = TRUE;
456 pthread_cond_wait(&entry->cache->wait_for_pending,
457 &entry->cache->mutex);
458 }
459
460 pthread_mutex_unlock(&entry->cache->mutex);
461 }
462
463
cache_block_put(struct cache_entry * entry)464 void cache_block_put(struct cache_entry *entry)
465 {
466 /*
467 * finished with this cache entry, once the usage count reaches zero it
468 * can be reused and is put onto the free list. As it remains
469 * accessible via the hash table it can be found getting a new lease of
470 * life before it is reused.
471 */
472 pthread_mutex_lock(&entry->cache->mutex);
473
474 entry->used --;
475 if(entry->used == 0) {
476 insert_free_list(entry->cache, entry);
477 entry->cache->used --;
478
479 /*
480 * if the wait_free flag is set, one or more threads may be
481 * waiting on this buffer
482 */
483 if(entry->cache->wait_free) {
484 entry->cache->wait_free = FALSE;
485 pthread_cond_broadcast(&entry->cache->wait_for_free);
486 }
487 }
488
489 pthread_mutex_unlock(&entry->cache->mutex);
490 }
491
492
dump_cache(struct cache * cache)493 void dump_cache(struct cache *cache)
494 {
495 pthread_mutex_lock(&cache->mutex);
496
497 printf("Max buffers %d, Current size %d, Used %d, %s\n",
498 cache->max_buffers, cache->count, cache->used,
499 cache->free_list ? "Free buffers" : "No free buffers");
500
501 pthread_mutex_unlock(&cache->mutex);
502 }
503
504
modestr(char * str,int mode)505 char *modestr(char *str, int mode)
506 {
507 int i;
508
509 strcpy(str, "----------");
510
511 for(i = 0; table[i].mask != 0; i++) {
512 if((mode & table[i].mask) == table[i].value)
513 str[table[i].position] = table[i].mode;
514 }
515
516 return str;
517 }
518
519
520 #define TOTALCHARS 25
print_filename(char * pathname,struct inode * inode)521 int print_filename(char *pathname, struct inode *inode)
522 {
523 char str[11], dummy[12], dummy2[12]; /* overflow safe */
524 char *userstr, *groupstr;
525 int padchars;
526 struct passwd *user;
527 struct group *group;
528 struct tm *t;
529
530 if(short_ls) {
531 printf("%s\n", pathname);
532 return 1;
533 }
534
535 user = getpwuid(inode->uid);
536 if(user == NULL) {
537 int res = snprintf(dummy, 12, "%d", inode->uid);
538 if(res < 0)
539 EXIT_UNSQUASH("snprintf failed in print_filename()\n");
540 else if(res >= 12)
541 /* unsigned int shouldn't ever need more than 11 bytes
542 * (including terminating '\0') to print in base 10 */
543 userstr = "*";
544 else
545 userstr = dummy;
546 } else
547 userstr = user->pw_name;
548
549 group = getgrgid(inode->gid);
550 if(group == NULL) {
551 int res = snprintf(dummy2, 12, "%d", inode->gid);
552 if(res < 0)
553 EXIT_UNSQUASH("snprintf failed in print_filename()\n");
554 else if(res >= 12)
555 /* unsigned int shouldn't ever need more than 11 bytes
556 * (including terminating '\0') to print in base 10 */
557 groupstr = "*";
558 else
559 groupstr = dummy2;
560 } else
561 groupstr = group->gr_name;
562
563 printf("%s %s/%s ", modestr(str, inode->mode), userstr, groupstr);
564
565 switch(inode->mode & S_IFMT) {
566 case S_IFREG:
567 case S_IFDIR:
568 case S_IFSOCK:
569 case S_IFIFO:
570 case S_IFLNK:
571 padchars = TOTALCHARS - strlen(userstr) -
572 strlen(groupstr);
573
574 printf("%*lld ", padchars > 0 ? padchars : 0,
575 inode->data);
576 break;
577 case S_IFCHR:
578 case S_IFBLK:
579 padchars = TOTALCHARS - strlen(userstr) -
580 strlen(groupstr) - 7;
581
582 printf("%*s%3d,%3d ", padchars > 0 ? padchars : 0, " ",
583 (int) inode->data >> 8, (int) inode->data &
584 0xff);
585 break;
586 }
587
588 t = localtime(&inode->time);
589
590 printf("%d-%02d-%02d %02d:%02d %s", t->tm_year + 1900, t->tm_mon + 1,
591 t->tm_mday, t->tm_hour, t->tm_min, pathname);
592 if((inode->mode & S_IFMT) == S_IFLNK)
593 printf(" -> %s", inode->symlink);
594 printf("\n");
595
596 return 1;
597 }
598
599
add_entry(struct hash_table_entry * hash_table[],long long start,int bytes)600 void add_entry(struct hash_table_entry *hash_table[], long long start,
601 int bytes)
602 {
603 int hash = CALCULATE_HASH(start);
604 struct hash_table_entry *hash_table_entry;
605
606 hash_table_entry = malloc(sizeof(struct hash_table_entry));
607 if(hash_table_entry == NULL)
608 EXIT_UNSQUASH("Out of memory in add_entry\n");
609
610 hash_table_entry->start = start;
611 hash_table_entry->bytes = bytes;
612 hash_table_entry->next = hash_table[hash];
613 hash_table[hash] = hash_table_entry;
614 }
615
616
lookup_entry(struct hash_table_entry * hash_table[],long long start)617 int lookup_entry(struct hash_table_entry *hash_table[], long long start)
618 {
619 int hash = CALCULATE_HASH(start);
620 struct hash_table_entry *hash_table_entry;
621
622 for(hash_table_entry = hash_table[hash]; hash_table_entry;
623 hash_table_entry = hash_table_entry->next)
624
625 if(hash_table_entry->start == start)
626 return hash_table_entry->bytes;
627
628 return -1;
629 }
630
631
read_fs_bytes(int fd,long long byte,int bytes,void * buff)632 int read_fs_bytes(int fd, long long byte, int bytes, void *buff)
633 {
634 off_t off = byte;
635 int res, count;
636
637 TRACE("read_bytes: reading from position 0x%llx, bytes %d\n", byte,
638 bytes);
639
640 if(lseek(fd, off, SEEK_SET) == -1) {
641 ERROR("Lseek failed because %s\n", strerror(errno));
642 return FALSE;
643 }
644
645 for(count = 0; count < bytes; count += res) {
646 res = read(fd, buff + count, bytes - count);
647 if(res < 1) {
648 if(res == 0) {
649 ERROR("Read on filesystem failed because "
650 "EOF\n");
651 return FALSE;
652 } else if(errno != EINTR) {
653 ERROR("Read on filesystem failed because %s\n",
654 strerror(errno));
655 return FALSE;
656 } else
657 res = 0;
658 }
659 }
660
661 return TRUE;
662 }
663
664
read_block(int fd,long long start,long long * next,int expected,void * block)665 int read_block(int fd, long long start, long long *next, int expected,
666 void *block)
667 {
668 unsigned short c_byte;
669 int offset = 2, res, compressed;
670 int outlen = expected ? expected : SQUASHFS_METADATA_SIZE;
671
672 if(swap) {
673 if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
674 goto failed;
675 c_byte = (c_byte >> 8) | ((c_byte & 0xff) << 8);
676 } else
677 if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
678 goto failed;
679
680 TRACE("read_block: block @0x%llx, %d %s bytes\n", start,
681 SQUASHFS_COMPRESSED_SIZE(c_byte), SQUASHFS_COMPRESSED(c_byte) ?
682 "compressed" : "uncompressed");
683
684 if(SQUASHFS_CHECK_DATA(sBlk.s.flags))
685 offset = 3;
686
687 compressed = SQUASHFS_COMPRESSED(c_byte);
688 c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
689
690 /*
691 * The block size should not be larger than
692 * the uncompressed size (or max uncompressed size if
693 * expected is 0)
694 */
695 if(c_byte > outlen)
696 return 0;
697
698 if(compressed) {
699 char buffer[c_byte];
700 int error;
701
702 res = read_fs_bytes(fd, start + offset, c_byte, buffer);
703 if(res == FALSE)
704 goto failed;
705
706 res = compressor_uncompress(comp, block, buffer, c_byte,
707 outlen, &error);
708
709 if(res == -1) {
710 ERROR("%s uncompress failed with error code %d\n",
711 comp->name, error);
712 goto failed;
713 }
714 } else {
715 res = read_fs_bytes(fd, start + offset, c_byte, block);
716 if(res == FALSE)
717 goto failed;
718 res = c_byte;
719 }
720
721 if(next)
722 *next = start + offset + c_byte;
723
724 /*
725 * if expected, then check the (uncompressed) return data
726 * is of the expected size
727 */
728 if(expected && expected != res)
729 return 0;
730 else
731 return res;
732
733 failed:
734 ERROR("read_block: failed to read block @0x%llx\n", start);
735 return FALSE;
736 }
737
738
read_data_block(long long start,unsigned int size,char * block)739 int read_data_block(long long start, unsigned int size, char *block)
740 {
741 int error, res;
742 int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
743
744 TRACE("read_data_block: block @0x%llx, %d %s bytes\n", start,
745 c_byte, SQUASHFS_COMPRESSED_BLOCK(size) ? "compressed" :
746 "uncompressed");
747
748 if(SQUASHFS_COMPRESSED_BLOCK(size)) {
749 if(read_fs_bytes(fd, start, c_byte, data) == FALSE)
750 goto failed;
751
752 res = compressor_uncompress(comp, block, data, c_byte,
753 block_size, &error);
754
755 if(res == -1) {
756 ERROR("%s uncompress failed with error code %d\n",
757 comp->name, error);
758 goto failed;
759 }
760
761 return res;
762 } else {
763 if(read_fs_bytes(fd, start, c_byte, block) == FALSE)
764 goto failed;
765
766 return c_byte;
767 }
768
769 failed:
770 ERROR("read_data_block: failed to read block @0x%llx, size %d\n", start,
771 c_byte);
772 return FALSE;
773 }
774
775
read_inode_table(long long start,long long end)776 int read_inode_table(long long start, long long end)
777 {
778 int size = 0, bytes = 0, res;
779
780 TRACE("read_inode_table: start %lld, end %lld\n", start, end);
781
782 while(start < end) {
783 if(size - bytes < SQUASHFS_METADATA_SIZE) {
784 inode_table = realloc(inode_table, size +=
785 SQUASHFS_METADATA_SIZE);
786 if(inode_table == NULL) {
787 ERROR("Out of memory in read_inode_table");
788 goto failed;
789 }
790 }
791
792 add_entry(inode_table_hash, start, bytes);
793
794 res = read_block(fd, start, &start, 0, inode_table + bytes);
795 if(res == 0) {
796 ERROR("read_inode_table: failed to read block\n");
797 goto failed;
798 }
799 bytes += res;
800
801 /*
802 * If this is not the last metadata block in the inode table
803 * then it should be SQUASHFS_METADATA_SIZE in size.
804 * Note, we can't use expected in read_block() above for this
805 * because we don't know if this is the last block until
806 * after reading.
807 */
808 if(start != end && res != SQUASHFS_METADATA_SIZE) {
809 ERROR("read_inode_table: metadata block should be %d "
810 "bytes in length, it is %d bytes\n",
811 SQUASHFS_METADATA_SIZE, res);
812
813 goto failed;
814 }
815 }
816
817 return TRUE;
818
819 failed:
820 free(inode_table);
821 return FALSE;
822 }
823
824
set_attributes(char * pathname,int mode,uid_t uid,gid_t guid,time_t time,unsigned int xattr,unsigned int set_mode)825 int set_attributes(char *pathname, int mode, uid_t uid, gid_t guid, time_t time,
826 unsigned int xattr, unsigned int set_mode)
827 {
828 struct utimbuf times = { time, time };
829
830 if(utime(pathname, ×) == -1) {
831 ERROR("set_attributes: failed to set time on %s, because %s\n",
832 pathname, strerror(errno));
833 return FALSE;
834 }
835
836 if(root_process) {
837 if(chown(pathname, uid, guid) == -1) {
838 ERROR("set_attributes: failed to change uid and gids "
839 "on %s, because %s\n", pathname,
840 strerror(errno));
841 return FALSE;
842 }
843 } else
844 mode &= ~07000;
845
846 if((set_mode || (mode & 07000)) && chmod(pathname, (mode_t) mode) == -1) {
847 ERROR("set_attributes: failed to change mode %s, because %s\n",
848 pathname, strerror(errno));
849 return FALSE;
850 }
851
852 write_xattr(pathname, xattr);
853
854 return TRUE;
855 }
856
857
write_bytes(int fd,char * buff,int bytes)858 int write_bytes(int fd, char *buff, int bytes)
859 {
860 int res, count;
861
862 for(count = 0; count < bytes; count += res) {
863 res = write(fd, buff + count, bytes - count);
864 if(res == -1) {
865 if(errno != EINTR) {
866 ERROR("Write on output file failed because "
867 "%s\n", strerror(errno));
868 return -1;
869 }
870 res = 0;
871 }
872 }
873
874 return 0;
875 }
876
877
878 int lseek_broken = FALSE;
879 char *zero_data = NULL;
880
write_block(int file_fd,char * buffer,int size,long long hole,int sparse)881 int write_block(int file_fd, char *buffer, int size, long long hole, int sparse)
882 {
883 off_t off = hole;
884
885 if(hole) {
886 if(sparse && lseek_broken == FALSE) {
887 int error = lseek(file_fd, off, SEEK_CUR);
888 if(error == -1)
889 /* failed to seek beyond end of file */
890 lseek_broken = TRUE;
891 }
892
893 if((sparse == FALSE || lseek_broken) && zero_data == NULL) {
894 if((zero_data = malloc(block_size)) == NULL)
895 EXIT_UNSQUASH("write_block: failed to alloc "
896 "zero data block\n");
897 memset(zero_data, 0, block_size);
898 }
899
900 if(sparse == FALSE || lseek_broken) {
901 int blocks = (hole + block_size -1) / block_size;
902 int avail_bytes, i;
903 for(i = 0; i < blocks; i++, hole -= avail_bytes) {
904 avail_bytes = hole > block_size ? block_size :
905 hole;
906 if(write_bytes(file_fd, zero_data, avail_bytes)
907 == -1)
908 goto failure;
909 }
910 }
911 }
912
913 if(write_bytes(file_fd, buffer, size) == -1)
914 goto failure;
915
916 return TRUE;
917
918 failure:
919 return FALSE;
920 }
921
922
923 pthread_mutex_t open_mutex = PTHREAD_MUTEX_INITIALIZER;
924 pthread_cond_t open_empty = PTHREAD_COND_INITIALIZER;
925 int open_unlimited, open_count;
926 #define OPEN_FILE_MARGIN 10
927
928
open_init(int count)929 void open_init(int count)
930 {
931 open_count = count;
932 open_unlimited = count == -1;
933 }
934
935
open_wait(char * pathname,int flags,mode_t mode)936 int open_wait(char *pathname, int flags, mode_t mode)
937 {
938 if (!open_unlimited) {
939 pthread_mutex_lock(&open_mutex);
940 while (open_count == 0)
941 pthread_cond_wait(&open_empty, &open_mutex);
942 open_count --;
943 pthread_mutex_unlock(&open_mutex);
944 }
945
946 return open(pathname, flags, mode);
947 }
948
949
close_wake(int fd)950 void close_wake(int fd)
951 {
952 close(fd);
953
954 if (!open_unlimited) {
955 pthread_mutex_lock(&open_mutex);
956 open_count ++;
957 pthread_cond_signal(&open_empty);
958 pthread_mutex_unlock(&open_mutex);
959 }
960 }
961
962
queue_file(char * pathname,int file_fd,struct inode * inode)963 void queue_file(char *pathname, int file_fd, struct inode *inode)
964 {
965 struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
966 if(file == NULL)
967 EXIT_UNSQUASH("queue_file: unable to malloc file\n");
968
969 file->fd = file_fd;
970 file->file_size = inode->data;
971 file->mode = inode->mode;
972 file->gid = inode->gid;
973 file->uid = inode->uid;
974 file->time = inode->time;
975 file->pathname = strdup(pathname);
976 file->blocks = inode->blocks + (inode->frag_bytes > 0);
977 file->sparse = inode->sparse;
978 file->xattr = inode->xattr;
979 queue_put(to_writer, file);
980 }
981
982
queue_dir(char * pathname,struct dir * dir)983 void queue_dir(char *pathname, struct dir *dir)
984 {
985 struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
986 if(file == NULL)
987 EXIT_UNSQUASH("queue_dir: unable to malloc file\n");
988
989 file->fd = -1;
990 file->mode = dir->mode;
991 file->gid = dir->guid;
992 file->uid = dir->uid;
993 file->time = dir->mtime;
994 file->pathname = strdup(pathname);
995 file->xattr = dir->xattr;
996 queue_put(to_writer, file);
997 }
998
999
write_file(struct inode * inode,char * pathname)1000 int write_file(struct inode *inode, char *pathname)
1001 {
1002 unsigned int file_fd, i;
1003 unsigned int *block_list;
1004 int file_end = inode->data / block_size;
1005 long long start = inode->start;
1006
1007 TRACE("write_file: regular file, blocks %d\n", inode->blocks);
1008
1009 file_fd = open_wait(pathname, O_CREAT | O_WRONLY |
1010 (force ? O_TRUNC : 0), (mode_t) inode->mode & 0777);
1011 if(file_fd == -1) {
1012 ERROR("write_file: failed to create file %s, because %s\n",
1013 pathname, strerror(errno));
1014 return FALSE;
1015 }
1016
1017 block_list = malloc(inode->blocks * sizeof(unsigned int));
1018 if(block_list == NULL)
1019 EXIT_UNSQUASH("write_file: unable to malloc block list\n");
1020
1021 s_ops.read_block_list(block_list, inode->block_ptr, inode->blocks);
1022
1023 /*
1024 * the writer thread is queued a squashfs_file structure describing the
1025 * file. If the file has one or more blocks or a fragment they are
1026 * queued separately (references to blocks in the cache).
1027 */
1028 queue_file(pathname, file_fd, inode);
1029
1030 for(i = 0; i < inode->blocks; i++) {
1031 int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
1032 struct file_entry *block = malloc(sizeof(struct file_entry));
1033
1034 if(block == NULL)
1035 EXIT_UNSQUASH("write_file: unable to malloc file\n");
1036 block->offset = 0;
1037 block->size = i == file_end ? inode->data & (block_size - 1) :
1038 block_size;
1039 if(block_list[i] == 0) /* sparse block */
1040 block->buffer = NULL;
1041 else {
1042 block->buffer = cache_get(data_cache, start,
1043 block_list[i]);
1044 start += c_byte;
1045 }
1046 queue_put(to_writer, block);
1047 }
1048
1049 if(inode->frag_bytes) {
1050 int size;
1051 long long start;
1052 struct file_entry *block = malloc(sizeof(struct file_entry));
1053
1054 if(block == NULL)
1055 EXIT_UNSQUASH("write_file: unable to malloc file\n");
1056 s_ops.read_fragment(inode->fragment, &start, &size);
1057 block->buffer = cache_get(fragment_cache, start, size);
1058 block->offset = inode->offset;
1059 block->size = inode->frag_bytes;
1060 queue_put(to_writer, block);
1061 }
1062
1063 free(block_list);
1064 return TRUE;
1065 }
1066
1067
create_inode(char * pathname,struct inode * i)1068 int create_inode(char *pathname, struct inode *i)
1069 {
1070 TRACE("create_inode: pathname %s\n", pathname);
1071
1072 if(created_inode[i->inode_number - 1]) {
1073 TRACE("create_inode: hard link\n");
1074 if(force)
1075 unlink(pathname);
1076
1077 if(link(created_inode[i->inode_number - 1], pathname) == -1) {
1078 ERROR("create_inode: failed to create hardlink, "
1079 "because %s\n", strerror(errno));
1080 return FALSE;
1081 }
1082
1083 return TRUE;
1084 }
1085
1086 switch(i->type) {
1087 case SQUASHFS_FILE_TYPE:
1088 case SQUASHFS_LREG_TYPE:
1089 TRACE("create_inode: regular file, file_size %lld, "
1090 "blocks %d\n", i->data, i->blocks);
1091
1092 if(write_file(i, pathname))
1093 file_count ++;
1094 break;
1095 case SQUASHFS_SYMLINK_TYPE:
1096 case SQUASHFS_LSYMLINK_TYPE:
1097 TRACE("create_inode: symlink, symlink_size %lld\n",
1098 i->data);
1099
1100 if(force)
1101 unlink(pathname);
1102
1103 if(symlink(i->symlink, pathname) == -1) {
1104 ERROR("create_inode: failed to create symlink "
1105 "%s, because %s\n", pathname,
1106 strerror(errno));
1107 break;
1108 }
1109
1110 write_xattr(pathname, i->xattr);
1111
1112 if(root_process) {
1113 if(lchown(pathname, i->uid, i->gid) == -1)
1114 ERROR("create_inode: failed to change "
1115 "uid and gids on %s, because "
1116 "%s\n", pathname,
1117 strerror(errno));
1118 }
1119
1120 sym_count ++;
1121 break;
1122 case SQUASHFS_BLKDEV_TYPE:
1123 case SQUASHFS_CHRDEV_TYPE:
1124 case SQUASHFS_LBLKDEV_TYPE:
1125 case SQUASHFS_LCHRDEV_TYPE: {
1126 int chrdev = i->type == SQUASHFS_CHRDEV_TYPE;
1127 TRACE("create_inode: dev, rdev 0x%llx\n", i->data);
1128
1129 if(root_process) {
1130 if(force)
1131 unlink(pathname);
1132
1133 if(mknod(pathname, chrdev ? S_IFCHR : S_IFBLK,
1134 makedev((i->data >> 8) & 0xff,
1135 i->data & 0xff)) == -1) {
1136 ERROR("create_inode: failed to create "
1137 "%s device %s, because %s\n",
1138 chrdev ? "character" : "block",
1139 pathname, strerror(errno));
1140 break;
1141 }
1142 set_attributes(pathname, i->mode, i->uid,
1143 i->gid, i->time, i->xattr, TRUE);
1144 dev_count ++;
1145 } else
1146 ERROR("create_inode: could not create %s "
1147 "device %s, because you're not "
1148 "superuser!\n", chrdev ? "character" :
1149 "block", pathname);
1150 break;
1151 }
1152 case SQUASHFS_FIFO_TYPE:
1153 case SQUASHFS_LFIFO_TYPE:
1154 TRACE("create_inode: fifo\n");
1155
1156 if(force)
1157 unlink(pathname);
1158
1159 if(mknod(pathname, S_IFIFO, 0) == -1) {
1160 ERROR("create_inode: failed to create fifo %s, "
1161 "because %s\n", pathname,
1162 strerror(errno));
1163 break;
1164 }
1165 set_attributes(pathname, i->mode, i->uid, i->gid,
1166 i->time, i->xattr, TRUE);
1167 fifo_count ++;
1168 break;
1169 case SQUASHFS_SOCKET_TYPE:
1170 case SQUASHFS_LSOCKET_TYPE:
1171 TRACE("create_inode: socket\n");
1172 ERROR("create_inode: socket %s ignored\n", pathname);
1173 break;
1174 default:
1175 ERROR("Unknown inode type %d in create_inode_table!\n",
1176 i->type);
1177 return FALSE;
1178 }
1179
1180 created_inode[i->inode_number - 1] = strdup(pathname);
1181
1182 return TRUE;
1183 }
1184
1185
read_directory_table(long long start,long long end)1186 int read_directory_table(long long start, long long end)
1187 {
1188 int bytes = 0, size = 0, res;
1189
1190 TRACE("read_directory_table: start %lld, end %lld\n", start, end);
1191
1192 while(start < end) {
1193 if(size - bytes < SQUASHFS_METADATA_SIZE) {
1194 directory_table = realloc(directory_table, size +=
1195 SQUASHFS_METADATA_SIZE);
1196 if(directory_table == NULL) {
1197 ERROR("Out of memory in "
1198 "read_directory_table\n");
1199 goto failed;
1200 }
1201 }
1202
1203 add_entry(directory_table_hash, start, bytes);
1204
1205 res = read_block(fd, start, &start, 0, directory_table + bytes);
1206 if(res == 0) {
1207 ERROR("read_directory_table: failed to read block\n");
1208 goto failed;
1209 }
1210
1211 bytes += res;
1212
1213 /*
1214 * If this is not the last metadata block in the directory table
1215 * then it should be SQUASHFS_METADATA_SIZE in size.
1216 * Note, we can't use expected in read_block() above for this
1217 * because we don't know if this is the last block until
1218 * after reading.
1219 */
1220 if(start != end && res != SQUASHFS_METADATA_SIZE) {
1221 ERROR("read_directory_table: metadata block "
1222 "should be %d bytes in length, it is %d "
1223 "bytes\n", SQUASHFS_METADATA_SIZE, res);
1224 goto failed;
1225 }
1226 }
1227
1228 return TRUE;
1229
1230 failed:
1231 free(directory_table);
1232 return FALSE;
1233 }
1234
1235
squashfs_readdir(struct dir * dir,char ** name,unsigned int * start_block,unsigned int * offset,unsigned int * type)1236 int squashfs_readdir(struct dir *dir, char **name, unsigned int *start_block,
1237 unsigned int *offset, unsigned int *type)
1238 {
1239 if(dir->cur_entry == dir->dir_count)
1240 return FALSE;
1241
1242 *name = dir->dirs[dir->cur_entry].name;
1243 *start_block = dir->dirs[dir->cur_entry].start_block;
1244 *offset = dir->dirs[dir->cur_entry].offset;
1245 *type = dir->dirs[dir->cur_entry].type;
1246 dir->cur_entry ++;
1247
1248 return TRUE;
1249 }
1250
1251
squashfs_closedir(struct dir * dir)1252 void squashfs_closedir(struct dir *dir)
1253 {
1254 free(dir->dirs);
1255 free(dir);
1256 }
1257
1258
get_component(char * target,char ** targname)1259 char *get_component(char *target, char **targname)
1260 {
1261 char *start;
1262
1263 while(*target == '/')
1264 target ++;
1265
1266 start = target;
1267 while(*target != '/' && *target != '\0')
1268 target ++;
1269
1270 *targname = strndup(start, target - start);
1271
1272 while(*target == '/')
1273 target ++;
1274
1275 return target;
1276 }
1277
1278
free_path(struct pathname * paths)1279 void free_path(struct pathname *paths)
1280 {
1281 int i;
1282
1283 for(i = 0; i < paths->names; i++) {
1284 if(paths->name[i].paths)
1285 free_path(paths->name[i].paths);
1286 free(paths->name[i].name);
1287 if(paths->name[i].preg) {
1288 regfree(paths->name[i].preg);
1289 free(paths->name[i].preg);
1290 }
1291 }
1292
1293 free(paths);
1294 }
1295
1296
add_path(struct pathname * paths,char * target,char * alltarget)1297 struct pathname *add_path(struct pathname *paths, char *target, char *alltarget)
1298 {
1299 char *targname;
1300 int i, error;
1301
1302 TRACE("add_path: adding \"%s\" extract file\n", target);
1303
1304 target = get_component(target, &targname);
1305
1306 if(paths == NULL) {
1307 paths = malloc(sizeof(struct pathname));
1308 if(paths == NULL)
1309 EXIT_UNSQUASH("failed to allocate paths\n");
1310
1311 paths->names = 0;
1312 paths->name = NULL;
1313 }
1314
1315 for(i = 0; i < paths->names; i++)
1316 if(strcmp(paths->name[i].name, targname) == 0)
1317 break;
1318
1319 if(i == paths->names) {
1320 /*
1321 * allocate new name entry
1322 */
1323 paths->names ++;
1324 paths->name = realloc(paths->name, (i + 1) *
1325 sizeof(struct path_entry));
1326 if(paths->name == NULL)
1327 EXIT_UNSQUASH("Out of memory in add_path\n");
1328 paths->name[i].name = targname;
1329 paths->name[i].paths = NULL;
1330 if(use_regex) {
1331 paths->name[i].preg = malloc(sizeof(regex_t));
1332 if(paths->name[i].preg == NULL)
1333 EXIT_UNSQUASH("Out of memory in add_path\n");
1334 error = regcomp(paths->name[i].preg, targname,
1335 REG_EXTENDED|REG_NOSUB);
1336 if(error) {
1337 char str[1024]; /* overflow safe */
1338
1339 regerror(error, paths->name[i].preg, str, 1024);
1340 EXIT_UNSQUASH("invalid regex %s in export %s, "
1341 "because %s\n", targname, alltarget,
1342 str);
1343 }
1344 } else
1345 paths->name[i].preg = NULL;
1346
1347 if(target[0] == '\0')
1348 /*
1349 * at leaf pathname component
1350 */
1351 paths->name[i].paths = NULL;
1352 else
1353 /*
1354 * recurse adding child components
1355 */
1356 paths->name[i].paths = add_path(NULL, target, alltarget);
1357 } else {
1358 /*
1359 * existing matching entry
1360 */
1361 free(targname);
1362
1363 if(paths->name[i].paths == NULL) {
1364 /*
1365 * No sub-directory which means this is the leaf
1366 * component of a pre-existing extract which subsumes
1367 * the extract currently being added, in which case stop
1368 * adding components
1369 */
1370 } else if(target[0] == '\0') {
1371 /*
1372 * at leaf pathname component and child components exist
1373 * from more specific extracts, delete as they're
1374 * subsumed by this extract
1375 */
1376 free_path(paths->name[i].paths);
1377 paths->name[i].paths = NULL;
1378 } else
1379 /*
1380 * recurse adding child components
1381 */
1382 add_path(paths->name[i].paths, target, alltarget);
1383 }
1384
1385 return paths;
1386 }
1387
1388
init_subdir()1389 struct pathnames *init_subdir()
1390 {
1391 struct pathnames *new = malloc(sizeof(struct pathnames));
1392 if(new == NULL)
1393 EXIT_UNSQUASH("Out of memory in init_subdir\n");
1394 new->count = 0;
1395 return new;
1396 }
1397
1398
add_subdir(struct pathnames * paths,struct pathname * path)1399 struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
1400 {
1401 if(paths->count % PATHS_ALLOC_SIZE == 0) {
1402 paths = realloc(paths, sizeof(struct pathnames *) +
1403 (paths->count + PATHS_ALLOC_SIZE) *
1404 sizeof(struct pathname *));
1405 if(paths == NULL)
1406 EXIT_UNSQUASH("Out of memory in add_subdir\n");
1407 }
1408
1409 paths->path[paths->count++] = path;
1410 return paths;
1411 }
1412
1413
free_subdir(struct pathnames * paths)1414 void free_subdir(struct pathnames *paths)
1415 {
1416 free(paths);
1417 }
1418
1419
matches(struct pathnames * paths,char * name,struct pathnames ** new)1420 int matches(struct pathnames *paths, char *name, struct pathnames **new)
1421 {
1422 int i, n;
1423
1424 if(paths == NULL) {
1425 *new = NULL;
1426 return TRUE;
1427 }
1428
1429 *new = init_subdir();
1430
1431 for(n = 0; n < paths->count; n++) {
1432 struct pathname *path = paths->path[n];
1433 for(i = 0; i < path->names; i++) {
1434 int match = use_regex ?
1435 regexec(path->name[i].preg, name, (size_t) 0,
1436 NULL, 0) == 0 : fnmatch(path->name[i].name,
1437 name, FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) ==
1438 0;
1439 if(match && path->name[i].paths == NULL)
1440 /*
1441 * match on a leaf component, any subdirectories
1442 * will implicitly match, therefore return an
1443 * empty new search set
1444 */
1445 goto empty_set;
1446
1447 if(match)
1448 /*
1449 * match on a non-leaf component, add any
1450 * subdirectories to the new set of
1451 * subdirectories to scan for this name
1452 */
1453 *new = add_subdir(*new, path->name[i].paths);
1454 }
1455 }
1456
1457 if((*new)->count == 0) {
1458 /*
1459 * no matching names found, delete empty search set, and return
1460 * FALSE
1461 */
1462 free_subdir(*new);
1463 *new = NULL;
1464 return FALSE;
1465 }
1466
1467 /*
1468 * one or more matches with sub-directories found (no leaf matches),
1469 * return new search set and return TRUE
1470 */
1471 return TRUE;
1472
1473 empty_set:
1474 /*
1475 * found matching leaf exclude, return empty search set and return TRUE
1476 */
1477 free_subdir(*new);
1478 *new = NULL;
1479 return TRUE;
1480 }
1481
1482
pre_scan(char * parent_name,unsigned int start_block,unsigned int offset,struct pathnames * paths)1483 void pre_scan(char *parent_name, unsigned int start_block, unsigned int offset,
1484 struct pathnames *paths)
1485 {
1486 unsigned int type;
1487 char *name;
1488 struct pathnames *new;
1489 struct inode *i;
1490 struct dir *dir = s_ops.squashfs_opendir(start_block, offset, &i);
1491
1492 if(dir == NULL)
1493 return;
1494
1495 while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
1496 struct inode *i;
1497 char *pathname;
1498 int res;
1499
1500 TRACE("pre_scan: name %s, start_block %d, offset %d, type %d\n",
1501 name, start_block, offset, type);
1502
1503 if(!matches(paths, name, &new))
1504 continue;
1505
1506 res = asprintf(&pathname, "%s/%s", parent_name, name);
1507 if(res == -1)
1508 EXIT_UNSQUASH("asprintf failed in dir_scan\n");
1509
1510 if(type == SQUASHFS_DIR_TYPE)
1511 pre_scan(parent_name, start_block, offset, new);
1512 else if(new == NULL) {
1513 if(type == SQUASHFS_FILE_TYPE ||
1514 type == SQUASHFS_LREG_TYPE) {
1515 i = s_ops.read_inode(start_block, offset);
1516 if(created_inode[i->inode_number - 1] == NULL) {
1517 created_inode[i->inode_number - 1] =
1518 (char *) i;
1519 total_blocks += (i->data +
1520 (block_size - 1)) >> block_log;
1521 }
1522 total_files ++;
1523 }
1524 total_inodes ++;
1525 }
1526
1527 free_subdir(new);
1528 free(pathname);
1529 }
1530
1531 squashfs_closedir(dir);
1532 }
1533
1534
dir_scan(char * parent_name,unsigned int start_block,unsigned int offset,struct pathnames * paths)1535 void dir_scan(char *parent_name, unsigned int start_block, unsigned int offset,
1536 struct pathnames *paths)
1537 {
1538 unsigned int type;
1539 char *name;
1540 struct pathnames *new;
1541 struct inode *i;
1542 struct dir *dir = s_ops.squashfs_opendir(start_block, offset, &i);
1543
1544 if(dir == NULL) {
1545 ERROR("dir_scan: failed to read directory %s, skipping\n",
1546 parent_name);
1547 return;
1548 }
1549
1550 if(lsonly || info)
1551 print_filename(parent_name, i);
1552
1553 if(!lsonly) {
1554 /*
1555 * Make directory with default User rwx permissions rather than
1556 * the permissions from the filesystem, as these may not have
1557 * write/execute permission. These are fixed up later in
1558 * set_attributes().
1559 */
1560 int res = mkdir(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
1561 if(res == -1) {
1562 /*
1563 * Skip directory if mkdir fails, unless we're
1564 * forcing and the error is -EEXIST
1565 */
1566 if(!force || errno != EEXIST) {
1567 ERROR("dir_scan: failed to make directory %s, "
1568 "because %s\n", parent_name,
1569 strerror(errno));
1570 squashfs_closedir(dir);
1571 return;
1572 }
1573
1574 /*
1575 * Try to change permissions of existing directory so
1576 * that we can write to it
1577 */
1578 res = chmod(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
1579 if (res == -1)
1580 ERROR("dir_scan: failed to change permissions "
1581 "for directory %s, because %s\n",
1582 parent_name, strerror(errno));
1583 }
1584 }
1585
1586 while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
1587 char *pathname;
1588 int res;
1589
1590 TRACE("dir_scan: name %s, start_block %d, offset %d, type %d\n",
1591 name, start_block, offset, type);
1592
1593
1594 if(!matches(paths, name, &new))
1595 continue;
1596
1597 res = asprintf(&pathname, "%s/%s", parent_name, name);
1598 if(res == -1)
1599 EXIT_UNSQUASH("asprintf failed in dir_scan\n");
1600
1601 if(type == SQUASHFS_DIR_TYPE) {
1602 dir_scan(pathname, start_block, offset, new);
1603 free(pathname);
1604 } else if(new == NULL) {
1605 update_info(pathname);
1606
1607 i = s_ops.read_inode(start_block, offset);
1608
1609 if(lsonly || info)
1610 print_filename(pathname, i);
1611
1612 if(!lsonly)
1613 create_inode(pathname, i);
1614
1615 if(i->type == SQUASHFS_SYMLINK_TYPE ||
1616 i->type == SQUASHFS_LSYMLINK_TYPE)
1617 free(i->symlink);
1618 } else
1619 free(pathname);
1620
1621 free_subdir(new);
1622 }
1623
1624 if(!lsonly)
1625 queue_dir(parent_name, dir);
1626
1627 squashfs_closedir(dir);
1628 dir_count ++;
1629 }
1630
1631
squashfs_stat(char * source)1632 void squashfs_stat(char *source)
1633 {
1634 time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
1635 char *mkfs_str = ctime(&mkfs_time);
1636
1637 #if __BYTE_ORDER == __BIG_ENDIAN
1638 printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
1639 sBlk.s.s_major == 4 ? "" : swap ? "little endian " :
1640 "big endian ", sBlk.s.s_major, sBlk.s.s_minor, source);
1641 #else
1642 printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
1643 sBlk.s.s_major == 4 ? "" : swap ? "big endian " :
1644 "little endian ", sBlk.s.s_major, sBlk.s.s_minor, source);
1645 #endif
1646
1647 printf("Creation or last append time %s", mkfs_str ? mkfs_str :
1648 "failed to get time\n");
1649 printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n",
1650 sBlk.s.bytes_used / 1024.0, sBlk.s.bytes_used /
1651 (1024.0 * 1024.0));
1652
1653 if(sBlk.s.s_major == 4) {
1654 printf("Compression %s\n", comp->name);
1655
1656 if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
1657 char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
1658 int bytes;
1659
1660 bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
1661 if(bytes == 0) {
1662 ERROR("Failed to read compressor options\n");
1663 return;
1664 }
1665
1666 compressor_display_options(comp, buffer, bytes);
1667 }
1668 }
1669
1670 printf("Block size %d\n", sBlk.s.block_size);
1671 printf("Filesystem is %sexportable via NFS\n",
1672 SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
1673 printf("Inodes are %scompressed\n",
1674 SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
1675 printf("Data is %scompressed\n",
1676 SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
1677
1678 if(sBlk.s.s_major > 1) {
1679 if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
1680 printf("Fragments are not stored\n");
1681 else {
1682 printf("Fragments are %scompressed\n",
1683 SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ?
1684 "un" : "");
1685 printf("Always-use-fragments option is %sspecified\n",
1686 SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" :
1687 "not ");
1688 }
1689 }
1690
1691 if(sBlk.s.s_major == 4) {
1692 if(SQUASHFS_NO_XATTRS(sBlk.s.flags))
1693 printf("Xattrs are not stored\n");
1694 else
1695 printf("Xattrs are %scompressed\n",
1696 SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.s.flags) ?
1697 "un" : "");
1698 }
1699
1700 if(sBlk.s.s_major < 4)
1701 printf("Check data is %spresent in the filesystem\n",
1702 SQUASHFS_CHECK_DATA(sBlk.s.flags) ? "" :
1703 "not ");
1704
1705 if(sBlk.s.s_major > 1)
1706 printf("Duplicates are %sremoved\n",
1707 SQUASHFS_DUPLICATES(sBlk.s.flags) ? "" : "not ");
1708 else
1709 printf("Duplicates are removed\n");
1710
1711 if(sBlk.s.s_major > 1)
1712 printf("Number of fragments %d\n", sBlk.s.fragments);
1713
1714 printf("Number of inodes %d\n", sBlk.s.inodes);
1715
1716 if(sBlk.s.s_major == 4)
1717 printf("Number of ids %d\n", sBlk.s.no_ids);
1718 else {
1719 printf("Number of uids %d\n", sBlk.no_uids);
1720 printf("Number of gids %d\n", sBlk.no_guids);
1721 }
1722
1723 TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
1724 TRACE("sBlk.s.directory_table_start 0x%llx\n",
1725 sBlk.s.directory_table_start);
1726
1727 if(sBlk.s.s_major > 1)
1728 TRACE("sBlk.s.fragment_table_start 0x%llx\n\n",
1729 sBlk.s.fragment_table_start);
1730
1731 if(sBlk.s.s_major > 2)
1732 TRACE("sBlk.s.lookup_table_start 0x%llx\n\n",
1733 sBlk.s.lookup_table_start);
1734
1735 if(sBlk.s.s_major == 4) {
1736 TRACE("sBlk.s.id_table_start 0x%llx\n", sBlk.s.id_table_start);
1737 TRACE("sBlk.s.xattr_id_table_start 0x%llx\n",
1738 sBlk.s.xattr_id_table_start);
1739 } else {
1740 TRACE("sBlk.uid_start 0x%llx\n", sBlk.uid_start);
1741 TRACE("sBlk.guid_start 0x%llx\n", sBlk.guid_start);
1742 }
1743 }
1744
1745
check_compression(struct compressor * comp)1746 int check_compression(struct compressor *comp)
1747 {
1748 int res, bytes = 0;
1749 char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
1750
1751 if(!comp->supported) {
1752 ERROR("Filesystem uses %s compression, this is "
1753 "unsupported by this version\n", comp->name);
1754 ERROR("Decompressors available:\n");
1755 display_compressors("", "");
1756 return 0;
1757 }
1758
1759 /*
1760 * Read compression options from disk if present, and pass to
1761 * the compressor to ensure we know how to decompress a filesystem
1762 * compressed with these compression options.
1763 *
1764 * Note, even if there is no compression options we still call the
1765 * compressor because some compression options may be mandatory
1766 * for some compressors.
1767 */
1768 if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
1769 bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
1770 if(bytes == 0) {
1771 ERROR("Failed to read compressor options\n");
1772 return 0;
1773 }
1774 }
1775
1776 res = compressor_check_options(comp, sBlk.s.block_size, buffer, bytes);
1777
1778 return res != -1;
1779 }
1780
1781
read_super(char * source)1782 int read_super(char *source)
1783 {
1784 squashfs_super_block_3 sBlk_3;
1785 struct squashfs_super_block sBlk_4;
1786
1787 /*
1788 * Try to read a Squashfs 4 superblock
1789 */
1790 read_fs_bytes(fd, SQUASHFS_START, sizeof(struct squashfs_super_block),
1791 &sBlk_4);
1792 swap = sBlk_4.s_magic != SQUASHFS_MAGIC;
1793 SQUASHFS_INSWAP_SUPER_BLOCK(&sBlk_4);
1794
1795 if(sBlk_4.s_magic == SQUASHFS_MAGIC && sBlk_4.s_major == 4 &&
1796 sBlk_4.s_minor == 0) {
1797 s_ops.squashfs_opendir = squashfs_opendir_4;
1798 s_ops.read_fragment = read_fragment_4;
1799 s_ops.read_fragment_table = read_fragment_table_4;
1800 s_ops.read_block_list = read_block_list_2;
1801 s_ops.read_inode = read_inode_4;
1802 s_ops.read_uids_guids = read_uids_guids_4;
1803 memcpy(&sBlk, &sBlk_4, sizeof(sBlk_4));
1804
1805 /*
1806 * Check the compression type
1807 */
1808 comp = lookup_compressor_id(sBlk.s.compression);
1809 return TRUE;
1810 }
1811
1812 /*
1813 * Not a Squashfs 4 superblock, try to read a squashfs 3 superblock
1814 * (compatible with 1 and 2 filesystems)
1815 */
1816 read_fs_bytes(fd, SQUASHFS_START, sizeof(squashfs_super_block_3),
1817 &sBlk_3);
1818
1819 /*
1820 * Check it is a SQUASHFS superblock
1821 */
1822 swap = 0;
1823 if(sBlk_3.s_magic != SQUASHFS_MAGIC) {
1824 if(sBlk_3.s_magic == SQUASHFS_MAGIC_SWAP) {
1825 squashfs_super_block_3 sblk;
1826 ERROR("Reading a different endian SQUASHFS filesystem "
1827 "on %s\n", source);
1828 SQUASHFS_SWAP_SUPER_BLOCK_3(&sblk, &sBlk_3);
1829 memcpy(&sBlk_3, &sblk, sizeof(squashfs_super_block_3));
1830 swap = 1;
1831 } else {
1832 ERROR("Can't find a SQUASHFS superblock on %s\n",
1833 source);
1834 goto failed_mount;
1835 }
1836 }
1837
1838 sBlk.s.s_magic = sBlk_3.s_magic;
1839 sBlk.s.inodes = sBlk_3.inodes;
1840 sBlk.s.mkfs_time = sBlk_3.mkfs_time;
1841 sBlk.s.block_size = sBlk_3.block_size;
1842 sBlk.s.fragments = sBlk_3.fragments;
1843 sBlk.s.block_log = sBlk_3.block_log;
1844 sBlk.s.flags = sBlk_3.flags;
1845 sBlk.s.s_major = sBlk_3.s_major;
1846 sBlk.s.s_minor = sBlk_3.s_minor;
1847 sBlk.s.root_inode = sBlk_3.root_inode;
1848 sBlk.s.bytes_used = sBlk_3.bytes_used;
1849 sBlk.s.inode_table_start = sBlk_3.inode_table_start;
1850 sBlk.s.directory_table_start = sBlk_3.directory_table_start;
1851 sBlk.s.fragment_table_start = sBlk_3.fragment_table_start;
1852 sBlk.s.lookup_table_start = sBlk_3.lookup_table_start;
1853 sBlk.no_uids = sBlk_3.no_uids;
1854 sBlk.no_guids = sBlk_3.no_guids;
1855 sBlk.uid_start = sBlk_3.uid_start;
1856 sBlk.guid_start = sBlk_3.guid_start;
1857 sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
1858
1859 /* Check the MAJOR & MINOR versions */
1860 if(sBlk.s.s_major == 1 || sBlk.s.s_major == 2) {
1861 sBlk.s.bytes_used = sBlk_3.bytes_used_2;
1862 sBlk.uid_start = sBlk_3.uid_start_2;
1863 sBlk.guid_start = sBlk_3.guid_start_2;
1864 sBlk.s.inode_table_start = sBlk_3.inode_table_start_2;
1865 sBlk.s.directory_table_start = sBlk_3.directory_table_start_2;
1866
1867 if(sBlk.s.s_major == 1) {
1868 sBlk.s.block_size = sBlk_3.block_size_1;
1869 sBlk.s.fragment_table_start = sBlk.uid_start;
1870 s_ops.squashfs_opendir = squashfs_opendir_1;
1871 s_ops.read_fragment_table = read_fragment_table_1;
1872 s_ops.read_block_list = read_block_list_1;
1873 s_ops.read_inode = read_inode_1;
1874 s_ops.read_uids_guids = read_uids_guids_1;
1875 } else {
1876 sBlk.s.fragment_table_start =
1877 sBlk_3.fragment_table_start_2;
1878 s_ops.squashfs_opendir = squashfs_opendir_1;
1879 s_ops.read_fragment = read_fragment_2;
1880 s_ops.read_fragment_table = read_fragment_table_2;
1881 s_ops.read_block_list = read_block_list_2;
1882 s_ops.read_inode = read_inode_2;
1883 s_ops.read_uids_guids = read_uids_guids_1;
1884 }
1885 } else if(sBlk.s.s_major == 3) {
1886 s_ops.squashfs_opendir = squashfs_opendir_3;
1887 s_ops.read_fragment = read_fragment_3;
1888 s_ops.read_fragment_table = read_fragment_table_3;
1889 s_ops.read_block_list = read_block_list_2;
1890 s_ops.read_inode = read_inode_3;
1891 s_ops.read_uids_guids = read_uids_guids_1;
1892 } else {
1893 ERROR("Filesystem on %s is (%d:%d), ", source, sBlk.s.s_major,
1894 sBlk.s.s_minor);
1895 ERROR("which is a later filesystem version than I support!\n");
1896 goto failed_mount;
1897 }
1898
1899 /*
1900 * 1.x, 2.x and 3.x filesystems use gzip compression.
1901 */
1902 comp = lookup_compressor("gzip");
1903 return TRUE;
1904
1905 failed_mount:
1906 return FALSE;
1907 }
1908
1909
process_extract_files(struct pathname * path,char * filename)1910 struct pathname *process_extract_files(struct pathname *path, char *filename)
1911 {
1912 FILE *fd;
1913 char buffer[MAX_LINE + 1]; /* overflow safe */
1914 char *name;
1915
1916 fd = fopen(filename, "r");
1917 if(fd == NULL)
1918 EXIT_UNSQUASH("Failed to open extract file \"%s\" because %s\n",
1919 filename, strerror(errno));
1920
1921 while(fgets(name = buffer, MAX_LINE + 1, fd) != NULL) {
1922 int len = strlen(name);
1923
1924 if(len == MAX_LINE && name[len - 1] != '\n')
1925 /* line too large */
1926 EXIT_UNSQUASH("Line too long when reading "
1927 "extract file \"%s\", larger than %d "
1928 "bytes\n", filename, MAX_LINE);
1929
1930 /*
1931 * Remove '\n' terminator if it exists (the last line
1932 * in the file may not be '\n' terminated)
1933 */
1934 if(len && name[len - 1] == '\n')
1935 name[len - 1] = '\0';
1936
1937 /* Skip any leading whitespace */
1938 while(isspace(*name))
1939 name ++;
1940
1941 /* if comment line, skip */
1942 if(*name == '#')
1943 continue;
1944
1945 /* check for initial backslash, to accommodate
1946 * filenames with leading space or leading # character
1947 */
1948 if(*name == '\\')
1949 name ++;
1950
1951 /* if line is now empty after skipping characters, skip it */
1952 if(*name == '\0')
1953 continue;
1954
1955 path = add_path(path, name, name);
1956 }
1957
1958 if(ferror(fd))
1959 EXIT_UNSQUASH("Reading extract file \"%s\" failed because %s\n",
1960 filename, strerror(errno));
1961
1962 fclose(fd);
1963 return path;
1964 }
1965
1966
1967 /*
1968 * reader thread. This thread processes read requests queued by the
1969 * cache_get() routine.
1970 */
reader(void * arg)1971 void *reader(void *arg)
1972 {
1973 while(1) {
1974 struct cache_entry *entry = queue_get(to_reader);
1975 int res = read_fs_bytes(fd, entry->block,
1976 SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size),
1977 entry->data);
1978
1979 if(res && SQUASHFS_COMPRESSED_BLOCK(entry->size))
1980 /*
1981 * queue successfully read block to the inflate
1982 * thread(s) for further processing
1983 */
1984 queue_put(to_inflate, entry);
1985 else
1986 /*
1987 * block has either been successfully read and is
1988 * uncompressed, or an error has occurred, clear pending
1989 * flag, set error appropriately, and wake up any
1990 * threads waiting on this buffer
1991 */
1992 cache_block_ready(entry, !res);
1993 }
1994 }
1995
1996
1997 /*
1998 * writer thread. This processes file write requests queued by the
1999 * write_file() routine.
2000 */
writer(void * arg)2001 void *writer(void *arg)
2002 {
2003 int i;
2004
2005 while(1) {
2006 struct squashfs_file *file = queue_get(to_writer);
2007 int file_fd;
2008 long long hole = 0;
2009 int failed = FALSE;
2010 int error;
2011
2012 if(file == NULL) {
2013 queue_put(from_writer, NULL);
2014 continue;
2015 } else if(file->fd == -1) {
2016 /* write attributes for directory file->pathname */
2017 set_attributes(file->pathname, file->mode, file->uid,
2018 file->gid, file->time, file->xattr, TRUE);
2019 free(file->pathname);
2020 free(file);
2021 continue;
2022 }
2023
2024 TRACE("writer: regular file, blocks %d\n", file->blocks);
2025
2026 file_fd = file->fd;
2027
2028 for(i = 0; i < file->blocks; i++, cur_blocks ++) {
2029 struct file_entry *block = queue_get(to_writer);
2030
2031 if(block->buffer == 0) { /* sparse file */
2032 hole += block->size;
2033 free(block);
2034 continue;
2035 }
2036
2037 cache_block_wait(block->buffer);
2038
2039 if(block->buffer->error)
2040 failed = TRUE;
2041
2042 if(failed)
2043 continue;
2044
2045 error = write_block(file_fd, block->buffer->data +
2046 block->offset, block->size, hole, file->sparse);
2047
2048 if(error == FALSE) {
2049 ERROR("writer: failed to write data block %d\n",
2050 i);
2051 failed = TRUE;
2052 }
2053
2054 hole = 0;
2055 cache_block_put(block->buffer);
2056 free(block);
2057 }
2058
2059 if(hole && failed == FALSE) {
2060 /*
2061 * corner case for hole extending to end of file
2062 */
2063 if(file->sparse == FALSE ||
2064 lseek(file_fd, hole, SEEK_CUR) == -1) {
2065 /*
2066 * for files which we don't want to write
2067 * sparsely, or for broken lseeks which cannot
2068 * seek beyond end of file, write_block will do
2069 * the right thing
2070 */
2071 hole --;
2072 if(write_block(file_fd, "\0", 1, hole,
2073 file->sparse) == FALSE) {
2074 ERROR("writer: failed to write sparse "
2075 "data block\n");
2076 failed = TRUE;
2077 }
2078 } else if(ftruncate(file_fd, file->file_size) == -1) {
2079 ERROR("writer: failed to write sparse data "
2080 "block\n");
2081 failed = TRUE;
2082 }
2083 }
2084
2085 close_wake(file_fd);
2086 if(failed == FALSE)
2087 set_attributes(file->pathname, file->mode, file->uid,
2088 file->gid, file->time, file->xattr, force);
2089 else {
2090 ERROR("Failed to write %s, skipping\n", file->pathname);
2091 unlink(file->pathname);
2092 }
2093 free(file->pathname);
2094 free(file);
2095
2096 }
2097 }
2098
2099
2100 /*
2101 * decompress thread. This decompresses buffers queued by the read thread
2102 */
inflator(void * arg)2103 void *inflator(void *arg)
2104 {
2105 char tmp[block_size];
2106
2107 while(1) {
2108 struct cache_entry *entry = queue_get(to_inflate);
2109 int error, res;
2110
2111 res = compressor_uncompress(comp, tmp, entry->data,
2112 SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size), block_size,
2113 &error);
2114
2115 if(res == -1)
2116 ERROR("%s uncompress failed with error code %d\n",
2117 comp->name, error);
2118 else
2119 memcpy(entry->data, tmp, res);
2120
2121 /*
2122 * block has been either successfully decompressed, or an error
2123 * occurred, clear pending flag, set error appropriately and
2124 * wake up any threads waiting on this block
2125 */
2126 cache_block_ready(entry, res == -1);
2127 }
2128 }
2129
2130
progress_thread(void * arg)2131 void *progress_thread(void *arg)
2132 {
2133 struct timespec requested_time, remaining;
2134 struct itimerval itimerval;
2135 struct winsize winsize;
2136
2137 if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
2138 if(isatty(STDOUT_FILENO))
2139 ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
2140 "columns\n");
2141 columns = 80;
2142 } else
2143 columns = winsize.ws_col;
2144 signal(SIGWINCH, sigwinch_handler);
2145 signal(SIGALRM, sigalrm_handler);
2146
2147 itimerval.it_value.tv_sec = 0;
2148 itimerval.it_value.tv_usec = 250000;
2149 itimerval.it_interval.tv_sec = 0;
2150 itimerval.it_interval.tv_usec = 250000;
2151 setitimer(ITIMER_REAL, &itimerval, NULL);
2152
2153 requested_time.tv_sec = 0;
2154 requested_time.tv_nsec = 250000000;
2155
2156 while(1) {
2157 int res = nanosleep(&requested_time, &remaining);
2158
2159 if(res == -1 && errno != EINTR)
2160 EXIT_UNSQUASH("nanosleep failed in progress thread\n");
2161
2162 if(progress_enabled) {
2163 pthread_mutex_lock(&screen_mutex);
2164 progress_bar(sym_count + dev_count +
2165 fifo_count + cur_blocks, total_inodes -
2166 total_files + total_blocks, columns);
2167 pthread_mutex_unlock(&screen_mutex);
2168 }
2169 }
2170 }
2171
2172
initialise_threads(int fragment_buffer_size,int data_buffer_size)2173 void initialise_threads(int fragment_buffer_size, int data_buffer_size)
2174 {
2175 struct rlimit rlim;
2176 int i, max_files, res;
2177 sigset_t sigmask, old_mask;
2178
2179 /* block SIGQUIT and SIGHUP, these are handled by the info thread */
2180 sigemptyset(&sigmask);
2181 sigaddset(&sigmask, SIGQUIT);
2182 sigaddset(&sigmask, SIGHUP);
2183 sigaddset(&sigmask, SIGALRM);
2184 if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1)
2185 EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2186 "\n");
2187
2188 /*
2189 * temporarily block these signals so the created sub-threads will
2190 * ignore them, ensuring the main thread handles them
2191 */
2192 sigemptyset(&sigmask);
2193 sigaddset(&sigmask, SIGINT);
2194 sigaddset(&sigmask, SIGTERM);
2195 if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) == -1)
2196 EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2197 "\n");
2198
2199 if(processors == -1) {
2200 #ifndef linux
2201 int mib[2];
2202 size_t len = sizeof(processors);
2203
2204 mib[0] = CTL_HW;
2205 #ifdef HW_AVAILCPU
2206 mib[1] = HW_AVAILCPU;
2207 #else
2208 mib[1] = HW_NCPU;
2209 #endif
2210
2211 if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
2212 ERROR("Failed to get number of available processors. "
2213 "Defaulting to 1\n");
2214 processors = 1;
2215 }
2216 #else
2217 processors = sysconf(_SC_NPROCESSORS_ONLN);
2218 #endif
2219 }
2220
2221 if(add_overflow(processors, 3) ||
2222 multiply_overflow(processors + 3, sizeof(pthread_t)))
2223 EXIT_UNSQUASH("Processors too large\n");
2224
2225 thread = malloc((3 + processors) * sizeof(pthread_t));
2226 if(thread == NULL)
2227 EXIT_UNSQUASH("Out of memory allocating thread descriptors\n");
2228 inflator_thread = &thread[3];
2229
2230 /*
2231 * dimensioning the to_reader and to_inflate queues. The size of
2232 * these queues is directly related to the amount of block
2233 * read-ahead possible. To_reader queues block read requests to
2234 * the reader thread and to_inflate queues block decompression
2235 * requests to the inflate thread(s) (once the block has been read by
2236 * the reader thread). The amount of read-ahead is determined by
2237 * the combined size of the data_block and fragment caches which
2238 * determine the total number of blocks which can be "in flight"
2239 * at any one time (either being read or being decompressed)
2240 *
2241 * The maximum file open limit, however, affects the read-ahead
2242 * possible, in that for normal sizes of the fragment and data block
2243 * caches, where the incoming files have few data blocks or one fragment
2244 * only, the file open limit is likely to be reached before the
2245 * caches are full. This means the worst case sizing of the combined
2246 * sizes of the caches is unlikely to ever be necessary. However, is is
2247 * obvious read-ahead up to the data block cache size is always possible
2248 * irrespective of the file open limit, because a single file could
2249 * contain that number of blocks.
2250 *
2251 * Choosing the size as "file open limit + data block cache size" seems
2252 * to be a reasonable estimate. We can reasonably assume the maximum
2253 * likely read-ahead possible is data block cache size + one fragment
2254 * per open file.
2255 *
2256 * dimensioning the to_writer queue. The size of this queue is
2257 * directly related to the amount of block read-ahead possible.
2258 * However, unlike the to_reader and to_inflate queues, this is
2259 * complicated by the fact the to_writer queue not only contains
2260 * entries for fragments and data_blocks but it also contains
2261 * file entries, one per open file in the read-ahead.
2262 *
2263 * Choosing the size as "2 * (file open limit) +
2264 * data block cache size" seems to be a reasonable estimate.
2265 * We can reasonably assume the maximum likely read-ahead possible
2266 * is data block cache size + one fragment per open file, and then
2267 * we will have a file_entry for each open file.
2268 */
2269 res = getrlimit(RLIMIT_NOFILE, &rlim);
2270 if (res == -1) {
2271 ERROR("failed to get open file limit! Defaulting to 1\n");
2272 rlim.rlim_cur = 1;
2273 }
2274
2275 if (rlim.rlim_cur != RLIM_INFINITY) {
2276 /*
2277 * leave OPEN_FILE_MARGIN free (rlim_cur includes fds used by
2278 * stdin, stdout, stderr and filesystem fd
2279 */
2280 if (rlim.rlim_cur <= OPEN_FILE_MARGIN)
2281 /* no margin, use minimum possible */
2282 max_files = 1;
2283 else
2284 max_files = rlim.rlim_cur - OPEN_FILE_MARGIN;
2285 } else
2286 max_files = -1;
2287
2288 /* set amount of available files for use by open_wait and close_wake */
2289 open_init(max_files);
2290
2291 /*
2292 * allocate to_reader, to_inflate and to_writer queues. Set based on
2293 * open file limit and cache size, unless open file limit is unlimited,
2294 * in which case set purely based on cache limits
2295 *
2296 * In doing so, check that the user supplied values do not overflow
2297 * a signed int
2298 */
2299 if (max_files != -1) {
2300 if(add_overflow(data_buffer_size, max_files) ||
2301 add_overflow(data_buffer_size, max_files * 2))
2302 EXIT_UNSQUASH("Data queue size is too large\n");
2303
2304 to_reader = queue_init(max_files + data_buffer_size);
2305 to_inflate = queue_init(max_files + data_buffer_size);
2306 to_writer = queue_init(max_files * 2 + data_buffer_size);
2307 } else {
2308 int all_buffers_size;
2309
2310 if(add_overflow(fragment_buffer_size, data_buffer_size))
2311 EXIT_UNSQUASH("Data and fragment queues combined are"
2312 " too large\n");
2313
2314 all_buffers_size = fragment_buffer_size + data_buffer_size;
2315
2316 if(add_overflow(all_buffers_size, all_buffers_size))
2317 EXIT_UNSQUASH("Data and fragment queues combined are"
2318 " too large\n");
2319
2320 to_reader = queue_init(all_buffers_size);
2321 to_inflate = queue_init(all_buffers_size);
2322 to_writer = queue_init(all_buffers_size * 2);
2323 }
2324
2325 from_writer = queue_init(1);
2326
2327 fragment_cache = cache_init(block_size, fragment_buffer_size);
2328 data_cache = cache_init(block_size, data_buffer_size);
2329 pthread_create(&thread[0], NULL, reader, NULL);
2330 pthread_create(&thread[1], NULL, writer, NULL);
2331 pthread_create(&thread[2], NULL, progress_thread, NULL);
2332 init_info();
2333 pthread_mutex_init(&fragment_mutex, NULL);
2334
2335 for(i = 0; i < processors; i++) {
2336 if(pthread_create(&inflator_thread[i], NULL, inflator, NULL) !=
2337 0)
2338 EXIT_UNSQUASH("Failed to create thread\n");
2339 }
2340
2341 printf("Parallel unsquashfs: Using %d processor%s\n", processors,
2342 processors == 1 ? "" : "s");
2343
2344 if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) == -1)
2345 EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2346 "\n");
2347 }
2348
2349
enable_progress_bar()2350 void enable_progress_bar()
2351 {
2352 pthread_mutex_lock(&screen_mutex);
2353 progress_enabled = progress;
2354 pthread_mutex_unlock(&screen_mutex);
2355 }
2356
2357
disable_progress_bar()2358 void disable_progress_bar()
2359 {
2360 pthread_mutex_lock(&screen_mutex);
2361 if(progress_enabled) {
2362 progress_bar(sym_count + dev_count + fifo_count + cur_blocks,
2363 total_inodes - total_files + total_blocks, columns);
2364 printf("\n");
2365 }
2366 progress_enabled = FALSE;
2367 pthread_mutex_unlock(&screen_mutex);
2368 }
2369
2370
progressbar_error(char * fmt,...)2371 void progressbar_error(char *fmt, ...)
2372 {
2373 va_list ap;
2374
2375 pthread_mutex_lock(&screen_mutex);
2376
2377 if(progress_enabled)
2378 fprintf(stderr, "\n");
2379
2380 va_start(ap, fmt);
2381 vfprintf(stderr, fmt, ap);
2382 va_end(ap);
2383
2384 pthread_mutex_unlock(&screen_mutex);
2385 }
2386
2387
progressbar_info(char * fmt,...)2388 void progressbar_info(char *fmt, ...)
2389 {
2390 va_list ap;
2391
2392 pthread_mutex_lock(&screen_mutex);
2393
2394 if(progress_enabled)
2395 printf("\n");
2396
2397 va_start(ap, fmt);
2398 vprintf(fmt, ap);
2399 va_end(ap);
2400
2401 pthread_mutex_unlock(&screen_mutex);
2402 }
2403
progress_bar(long long current,long long max,int columns)2404 void progress_bar(long long current, long long max, int columns)
2405 {
2406 char rotate_list[] = { '|', '/', '-', '\\' };
2407 int max_digits, used, hashes, spaces;
2408 static int tty = -1;
2409
2410 if(max == 0)
2411 return;
2412
2413 max_digits = floor(log10(max)) + 1;
2414 used = max_digits * 2 + 11;
2415 hashes = (current * (columns - used)) / max;
2416 spaces = columns - used - hashes;
2417
2418 if((current > max) || (columns - used < 0))
2419 return;
2420
2421 if(tty == -1)
2422 tty = isatty(STDOUT_FILENO);
2423 if(!tty) {
2424 static long long previous = -1;
2425
2426 /*
2427 * Updating much more frequently than this results in huge
2428 * log files.
2429 */
2430 if((current % 100) != 0 && current != max)
2431 return;
2432 /* Don't update just to rotate the spinner. */
2433 if(current == previous)
2434 return;
2435 previous = current;
2436 }
2437
2438 printf("\r[");
2439
2440 while (hashes --)
2441 putchar('=');
2442
2443 putchar(rotate_list[rotate]);
2444
2445 while(spaces --)
2446 putchar(' ');
2447
2448 printf("] %*lld/%*lld", max_digits, current, max_digits, max);
2449 printf(" %3lld%%", current * 100 / max);
2450 fflush(stdout);
2451 }
2452
2453
parse_number(char * arg,int * res)2454 int parse_number(char *arg, int *res)
2455 {
2456 char *b;
2457 long number = strtol(arg, &b, 10);
2458
2459 /* check for trailing junk after number */
2460 if(*b != '\0')
2461 return 0;
2462
2463 /*
2464 * check for strtol underflow or overflow in conversion.
2465 * Note: strtol can validly return LONG_MIN and LONG_MAX
2466 * if the user entered these values, but, additional code
2467 * to distinguish this scenario is unnecessary, because for
2468 * our purposes LONG_MIN and LONG_MAX are too large anyway
2469 */
2470 if(number == LONG_MIN || number == LONG_MAX)
2471 return 0;
2472
2473 /* reject negative numbers as invalid */
2474 if(number < 0)
2475 return 0;
2476
2477 /* check if long result will overflow signed int */
2478 if(number > INT_MAX)
2479 return 0;
2480
2481 *res = number;
2482 return 1;
2483 }
2484
2485
2486 #define VERSION() \
2487 printf("unsquashfs version 4.3 (2014/05/12)\n");\
2488 printf("copyright (C) 2014 Phillip Lougher "\
2489 "<[email protected]>\n\n");\
2490 printf("This program is free software; you can redistribute it and/or"\
2491 "\n");\
2492 printf("modify it under the terms of the GNU General Public License"\
2493 "\n");\
2494 printf("as published by the Free Software Foundation; either version "\
2495 "2,\n");\
2496 printf("or (at your option) any later version.\n\n");\
2497 printf("This program is distributed in the hope that it will be "\
2498 "useful,\n");\
2499 printf("but WITHOUT ANY WARRANTY; without even the implied warranty of"\
2500 "\n");\
2501 printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"\
2502 "\n");\
2503 printf("GNU General Public License for more details.\n");
main(int argc,char * argv[])2504 int main(int argc, char *argv[])
2505 {
2506 char *dest = "squashfs-root";
2507 int i, stat_sys = FALSE, version = FALSE;
2508 int n;
2509 struct pathnames *paths = NULL;
2510 struct pathname *path = NULL;
2511 long long directory_table_end;
2512 int fragment_buffer_size = FRAGMENT_BUFFER_DEFAULT;
2513 int data_buffer_size = DATA_BUFFER_DEFAULT;
2514
2515 pthread_mutex_init(&screen_mutex, NULL);
2516 root_process = geteuid() == 0;
2517 if(root_process)
2518 umask(0);
2519
2520 for(i = 1; i < argc; i++) {
2521 if(*argv[i] != '-')
2522 break;
2523 if(strcmp(argv[i], "-version") == 0 ||
2524 strcmp(argv[i], "-v") == 0) {
2525 VERSION();
2526 version = TRUE;
2527 } else if(strcmp(argv[i], "-info") == 0 ||
2528 strcmp(argv[i], "-i") == 0)
2529 info = TRUE;
2530 else if(strcmp(argv[i], "-ls") == 0 ||
2531 strcmp(argv[i], "-l") == 0)
2532 lsonly = TRUE;
2533 else if(strcmp(argv[i], "-no-progress") == 0 ||
2534 strcmp(argv[i], "-n") == 0)
2535 progress = FALSE;
2536 else if(strcmp(argv[i], "-no-xattrs") == 0 ||
2537 strcmp(argv[i], "-no") == 0)
2538 no_xattrs = TRUE;
2539 else if(strcmp(argv[i], "-xattrs") == 0 ||
2540 strcmp(argv[i], "-x") == 0)
2541 no_xattrs = FALSE;
2542 else if(strcmp(argv[i], "-user-xattrs") == 0 ||
2543 strcmp(argv[i], "-u") == 0) {
2544 user_xattrs = TRUE;
2545 no_xattrs = FALSE;
2546 } else if(strcmp(argv[i], "-dest") == 0 ||
2547 strcmp(argv[i], "-d") == 0) {
2548 if(++i == argc) {
2549 fprintf(stderr, "%s: -dest missing filename\n",
2550 argv[0]);
2551 exit(1);
2552 }
2553 dest = argv[i];
2554 } else if(strcmp(argv[i], "-processors") == 0 ||
2555 strcmp(argv[i], "-p") == 0) {
2556 if((++i == argc) ||
2557 !parse_number(argv[i],
2558 &processors)) {
2559 ERROR("%s: -processors missing or invalid "
2560 "processor number\n", argv[0]);
2561 exit(1);
2562 }
2563 if(processors < 1) {
2564 ERROR("%s: -processors should be 1 or larger\n",
2565 argv[0]);
2566 exit(1);
2567 }
2568 } else if(strcmp(argv[i], "-data-queue") == 0 ||
2569 strcmp(argv[i], "-da") == 0) {
2570 if((++i == argc) ||
2571 !parse_number(argv[i],
2572 &data_buffer_size)) {
2573 ERROR("%s: -data-queue missing or invalid "
2574 "queue size\n", argv[0]);
2575 exit(1);
2576 }
2577 if(data_buffer_size < 1) {
2578 ERROR("%s: -data-queue should be 1 Mbyte or "
2579 "larger\n", argv[0]);
2580 exit(1);
2581 }
2582 } else if(strcmp(argv[i], "-frag-queue") == 0 ||
2583 strcmp(argv[i], "-fr") == 0) {
2584 if((++i == argc) ||
2585 !parse_number(argv[i],
2586 &fragment_buffer_size)) {
2587 ERROR("%s: -frag-queue missing or invalid "
2588 "queue size\n", argv[0]);
2589 exit(1);
2590 }
2591 if(fragment_buffer_size < 1) {
2592 ERROR("%s: -frag-queue should be 1 Mbyte or "
2593 "larger\n", argv[0]);
2594 exit(1);
2595 }
2596 } else if(strcmp(argv[i], "-force") == 0 ||
2597 strcmp(argv[i], "-f") == 0)
2598 force = TRUE;
2599 else if(strcmp(argv[i], "-stat") == 0 ||
2600 strcmp(argv[i], "-s") == 0)
2601 stat_sys = TRUE;
2602 else if(strcmp(argv[i], "-lls") == 0 ||
2603 strcmp(argv[i], "-ll") == 0) {
2604 lsonly = TRUE;
2605 short_ls = FALSE;
2606 } else if(strcmp(argv[i], "-linfo") == 0 ||
2607 strcmp(argv[i], "-li") == 0) {
2608 info = TRUE;
2609 short_ls = FALSE;
2610 } else if(strcmp(argv[i], "-ef") == 0 ||
2611 strcmp(argv[i], "-e") == 0) {
2612 if(++i == argc) {
2613 fprintf(stderr, "%s: -ef missing filename\n",
2614 argv[0]);
2615 exit(1);
2616 }
2617 path = process_extract_files(path, argv[i]);
2618 } else if(strcmp(argv[i], "-regex") == 0 ||
2619 strcmp(argv[i], "-r") == 0)
2620 use_regex = TRUE;
2621 else
2622 goto options;
2623 }
2624
2625 if(lsonly || info)
2626 progress = FALSE;
2627
2628 #ifdef SQUASHFS_TRACE
2629 /*
2630 * Disable progress bar if full debug tracing is enabled.
2631 * The progress bar in this case just gets in the way of the
2632 * debug trace output
2633 */
2634 progress = FALSE;
2635 #endif
2636
2637 if(i == argc) {
2638 if(!version) {
2639 options:
2640 ERROR("SYNTAX: %s [options] filesystem [directories or "
2641 "files to extract]\n", argv[0]);
2642 ERROR("\t-v[ersion]\t\tprint version, licence and "
2643 "copyright information\n");
2644 ERROR("\t-d[est] <pathname>\tunsquash to <pathname>, "
2645 "default \"squashfs-root\"\n");
2646 ERROR("\t-n[o-progress]\t\tdon't display the progress "
2647 "bar\n");
2648 ERROR("\t-no[-xattrs]\t\tdon't extract xattrs in file system"
2649 NOXOPT_STR"\n");
2650 ERROR("\t-x[attrs]\t\textract xattrs in file system"
2651 XOPT_STR "\n");
2652 ERROR("\t-u[ser-xattrs]\t\tonly extract user xattrs in "
2653 "file system.\n\t\t\t\tEnables extracting "
2654 "xattrs\n");
2655 ERROR("\t-p[rocessors] <number>\tuse <number> "
2656 "processors. By default will use\n");
2657 ERROR("\t\t\t\tnumber of processors available\n");
2658 ERROR("\t-i[nfo]\t\t\tprint files as they are "
2659 "unsquashed\n");
2660 ERROR("\t-li[nfo]\t\tprint files as they are "
2661 "unsquashed with file\n");
2662 ERROR("\t\t\t\tattributes (like ls -l output)\n");
2663 ERROR("\t-l[s]\t\t\tlist filesystem, but don't unsquash"
2664 "\n");
2665 ERROR("\t-ll[s]\t\t\tlist filesystem with file "
2666 "attributes (like\n");
2667 ERROR("\t\t\t\tls -l output), but don't unsquash\n");
2668 ERROR("\t-f[orce]\t\tif file already exists then "
2669 "overwrite\n");
2670 ERROR("\t-s[tat]\t\t\tdisplay filesystem superblock "
2671 "information\n");
2672 ERROR("\t-e[f] <extract file>\tlist of directories or "
2673 "files to extract.\n\t\t\t\tOne per line\n");
2674 ERROR("\t-da[ta-queue] <size>\tSet data queue to "
2675 "<size> Mbytes. Default %d\n\t\t\t\tMbytes\n",
2676 DATA_BUFFER_DEFAULT);
2677 ERROR("\t-fr[ag-queue] <size>\tSet fragment queue to "
2678 "<size> Mbytes. Default\n\t\t\t\t%d Mbytes\n",
2679 FRAGMENT_BUFFER_DEFAULT);
2680 ERROR("\t-r[egex]\t\ttreat extract names as POSIX "
2681 "regular expressions\n");
2682 ERROR("\t\t\t\trather than use the default shell "
2683 "wildcard\n\t\t\t\texpansion (globbing)\n");
2684 ERROR("\nDecompressors available:\n");
2685 display_compressors("", "");
2686 }
2687 exit(1);
2688 }
2689
2690 for(n = i + 1; n < argc; n++)
2691 path = add_path(path, argv[n], argv[n]);
2692
2693 if((fd = open(argv[i], O_RDONLY)) == -1) {
2694 ERROR("Could not open %s, because %s\n", argv[i],
2695 strerror(errno));
2696 exit(1);
2697 }
2698
2699 if(read_super(argv[i]) == FALSE)
2700 exit(1);
2701
2702 if(stat_sys) {
2703 squashfs_stat(argv[i]);
2704 exit(0);
2705 }
2706
2707 if(!check_compression(comp))
2708 exit(1);
2709
2710 block_size = sBlk.s.block_size;
2711 block_log = sBlk.s.block_log;
2712
2713 /*
2714 * Sanity check block size and block log.
2715 *
2716 * Check they're within correct limits
2717 */
2718 if(block_size > SQUASHFS_FILE_MAX_SIZE ||
2719 block_log > SQUASHFS_FILE_MAX_LOG)
2720 EXIT_UNSQUASH("Block size or block_log too large."
2721 " File system is corrupt.\n");
2722
2723 /*
2724 * Check block_size and block_log match
2725 */
2726 if(block_size != (1 << block_log))
2727 EXIT_UNSQUASH("Block size and block_log do not match."
2728 " File system is corrupt.\n");
2729
2730 /*
2731 * convert from queue size in Mbytes to queue size in
2732 * blocks.
2733 *
2734 * In doing so, check that the user supplied values do not
2735 * overflow a signed int
2736 */
2737 if(shift_overflow(fragment_buffer_size, 20 - block_log))
2738 EXIT_UNSQUASH("Fragment queue size is too large\n");
2739 else
2740 fragment_buffer_size <<= 20 - block_log;
2741
2742 if(shift_overflow(data_buffer_size, 20 - block_log))
2743 EXIT_UNSQUASH("Data queue size is too large\n");
2744 else
2745 data_buffer_size <<= 20 - block_log;
2746
2747 initialise_threads(fragment_buffer_size, data_buffer_size);
2748
2749 fragment_data = malloc(block_size);
2750 if(fragment_data == NULL)
2751 EXIT_UNSQUASH("failed to allocate fragment_data\n");
2752
2753 file_data = malloc(block_size);
2754 if(file_data == NULL)
2755 EXIT_UNSQUASH("failed to allocate file_data");
2756
2757 data = malloc(block_size);
2758 if(data == NULL)
2759 EXIT_UNSQUASH("failed to allocate data\n");
2760
2761 created_inode = malloc(sBlk.s.inodes * sizeof(char *));
2762 if(created_inode == NULL)
2763 EXIT_UNSQUASH("failed to allocate created_inode\n");
2764
2765 memset(created_inode, 0, sBlk.s.inodes * sizeof(char *));
2766
2767 if(s_ops.read_uids_guids() == FALSE)
2768 EXIT_UNSQUASH("failed to uid/gid table\n");
2769
2770 if(s_ops.read_fragment_table(&directory_table_end) == FALSE)
2771 EXIT_UNSQUASH("failed to read fragment table\n");
2772
2773 if(read_inode_table(sBlk.s.inode_table_start,
2774 sBlk.s.directory_table_start) == FALSE)
2775 EXIT_UNSQUASH("failed to read inode table\n");
2776
2777 if(read_directory_table(sBlk.s.directory_table_start,
2778 directory_table_end) == FALSE)
2779 EXIT_UNSQUASH("failed to read directory table\n");
2780
2781 if(no_xattrs)
2782 sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
2783
2784 if(read_xattrs_from_disk(fd, &sBlk.s) == 0)
2785 EXIT_UNSQUASH("failed to read the xattr table\n");
2786
2787 if(path) {
2788 paths = init_subdir();
2789 paths = add_subdir(paths, path);
2790 }
2791
2792 pre_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
2793 SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), paths);
2794
2795 memset(created_inode, 0, sBlk.s.inodes * sizeof(char *));
2796 inode_number = 1;
2797
2798 printf("%d inodes (%d blocks) to write\n\n", total_inodes,
2799 total_inodes - total_files + total_blocks);
2800
2801 enable_progress_bar();
2802
2803 dir_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
2804 SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), paths);
2805
2806 queue_put(to_writer, NULL);
2807 queue_get(from_writer);
2808
2809 disable_progress_bar();
2810
2811 if(!lsonly) {
2812 printf("\n");
2813 printf("created %d files\n", file_count);
2814 printf("created %d directories\n", dir_count);
2815 printf("created %d symlinks\n", sym_count);
2816 printf("created %d devices\n", dev_count);
2817 printf("created %d fifos\n", fifo_count);
2818 }
2819
2820 return 0;
2821 }
2822