xref: /aosp_15_r20/external/squashfs-tools/kernel/fs/squashfs/cache.c (revision 79398b2563bcbbbab54656397863972d8fa68df1)
1*79398b25SAndroid Build Coastguard Worker /*
2*79398b25SAndroid Build Coastguard Worker  * Squashfs - a compressed read only filesystem for Linux
3*79398b25SAndroid Build Coastguard Worker  *
4*79398b25SAndroid Build Coastguard Worker  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5*79398b25SAndroid Build Coastguard Worker  * Phillip Lougher <[email protected]>
6*79398b25SAndroid Build Coastguard Worker  *
7*79398b25SAndroid Build Coastguard Worker  * This program is free software; you can redistribute it and/or
8*79398b25SAndroid Build Coastguard Worker  * modify it under the terms of the GNU General Public License
9*79398b25SAndroid Build Coastguard Worker  * as published by the Free Software Foundation; either version 2,
10*79398b25SAndroid Build Coastguard Worker  * or (at your option) any later version.
11*79398b25SAndroid Build Coastguard Worker  *
12*79398b25SAndroid Build Coastguard Worker  * This program is distributed in the hope that it will be useful,
13*79398b25SAndroid Build Coastguard Worker  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14*79398b25SAndroid Build Coastguard Worker  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15*79398b25SAndroid Build Coastguard Worker  * GNU General Public License for more details.
16*79398b25SAndroid Build Coastguard Worker  *
17*79398b25SAndroid Build Coastguard Worker  * You should have received a copy of the GNU General Public License
18*79398b25SAndroid Build Coastguard Worker  * along with this program; if not, write to the Free Software
19*79398b25SAndroid Build Coastguard Worker  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20*79398b25SAndroid Build Coastguard Worker  *
21*79398b25SAndroid Build Coastguard Worker  * cache.c
22*79398b25SAndroid Build Coastguard Worker  */
23*79398b25SAndroid Build Coastguard Worker 
24*79398b25SAndroid Build Coastguard Worker /*
25*79398b25SAndroid Build Coastguard Worker  * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
26*79398b25SAndroid Build Coastguard Worker  * recently accessed data Squashfs uses two small metadata and fragment caches.
27*79398b25SAndroid Build Coastguard Worker  *
28*79398b25SAndroid Build Coastguard Worker  * This file implements a generic cache implementation used for both caches,
29*79398b25SAndroid Build Coastguard Worker  * plus functions layered ontop of the generic cache implementation to
30*79398b25SAndroid Build Coastguard Worker  * access the metadata and fragment caches.
31*79398b25SAndroid Build Coastguard Worker  *
32*79398b25SAndroid Build Coastguard Worker  * To avoid out of memory and fragmentation isssues with vmalloc the cache
33*79398b25SAndroid Build Coastguard Worker  * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
34*79398b25SAndroid Build Coastguard Worker  *
35*79398b25SAndroid Build Coastguard Worker  * It should be noted that the cache is not used for file datablocks, these
36*79398b25SAndroid Build Coastguard Worker  * are decompressed and cached in the page-cache in the normal way.  The
37*79398b25SAndroid Build Coastguard Worker  * cache is only used to temporarily cache fragment and metadata blocks
38*79398b25SAndroid Build Coastguard Worker  * which have been read as as a result of a metadata (i.e. inode or
39*79398b25SAndroid Build Coastguard Worker  * directory) or fragment access.  Because metadata and fragments are packed
40*79398b25SAndroid Build Coastguard Worker  * together into blocks (to gain greater compression) the read of a particular
41*79398b25SAndroid Build Coastguard Worker  * piece of metadata or fragment will retrieve other metadata/fragments which
42*79398b25SAndroid Build Coastguard Worker  * have been packed with it, these because of locality-of-reference may be read
43*79398b25SAndroid Build Coastguard Worker  * in the near future. Temporarily caching them ensures they are available for
44*79398b25SAndroid Build Coastguard Worker  * near future access without requiring an additional read and decompress.
45*79398b25SAndroid Build Coastguard Worker  */
46*79398b25SAndroid Build Coastguard Worker 
47*79398b25SAndroid Build Coastguard Worker #include <linux/fs.h>
48*79398b25SAndroid Build Coastguard Worker #include <linux/vfs.h>
49*79398b25SAndroid Build Coastguard Worker #include <linux/slab.h>
50*79398b25SAndroid Build Coastguard Worker #include <linux/vmalloc.h>
51*79398b25SAndroid Build Coastguard Worker #include <linux/sched.h>
52*79398b25SAndroid Build Coastguard Worker #include <linux/spinlock.h>
53*79398b25SAndroid Build Coastguard Worker #include <linux/wait.h>
54*79398b25SAndroid Build Coastguard Worker #include <linux/zlib.h>
55*79398b25SAndroid Build Coastguard Worker #include <linux/pagemap.h>
56*79398b25SAndroid Build Coastguard Worker 
57*79398b25SAndroid Build Coastguard Worker #include "squashfs_fs.h"
58*79398b25SAndroid Build Coastguard Worker #include "squashfs_fs_sb.h"
59*79398b25SAndroid Build Coastguard Worker #include "squashfs_fs_i.h"
60*79398b25SAndroid Build Coastguard Worker #include "squashfs.h"
61*79398b25SAndroid Build Coastguard Worker 
62*79398b25SAndroid Build Coastguard Worker /*
63*79398b25SAndroid Build Coastguard Worker  * Look-up block in cache, and increment usage count.  If not in cache, read
64*79398b25SAndroid Build Coastguard Worker  * and decompress it from disk.
65*79398b25SAndroid Build Coastguard Worker  */
squashfs_cache_get(struct super_block * sb,struct squashfs_cache * cache,u64 block,int length)66*79398b25SAndroid Build Coastguard Worker struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
67*79398b25SAndroid Build Coastguard Worker 	struct squashfs_cache *cache, u64 block, int length)
68*79398b25SAndroid Build Coastguard Worker {
69*79398b25SAndroid Build Coastguard Worker 	int i, n;
70*79398b25SAndroid Build Coastguard Worker 	struct squashfs_cache_entry *entry;
71*79398b25SAndroid Build Coastguard Worker 
72*79398b25SAndroid Build Coastguard Worker 	spin_lock(&cache->lock);
73*79398b25SAndroid Build Coastguard Worker 
74*79398b25SAndroid Build Coastguard Worker 	while (1) {
75*79398b25SAndroid Build Coastguard Worker 		for (i = 0; i < cache->entries; i++)
76*79398b25SAndroid Build Coastguard Worker 			if (cache->entry[i].block == block)
77*79398b25SAndroid Build Coastguard Worker 				break;
78*79398b25SAndroid Build Coastguard Worker 
79*79398b25SAndroid Build Coastguard Worker 		if (i == cache->entries) {
80*79398b25SAndroid Build Coastguard Worker 			/*
81*79398b25SAndroid Build Coastguard Worker 			 * Block not in cache, if all cache entries are used
82*79398b25SAndroid Build Coastguard Worker 			 * go to sleep waiting for one to become available.
83*79398b25SAndroid Build Coastguard Worker 			 */
84*79398b25SAndroid Build Coastguard Worker 			if (cache->unused == 0) {
85*79398b25SAndroid Build Coastguard Worker 				cache->num_waiters++;
86*79398b25SAndroid Build Coastguard Worker 				spin_unlock(&cache->lock);
87*79398b25SAndroid Build Coastguard Worker 				wait_event(cache->wait_queue, cache->unused);
88*79398b25SAndroid Build Coastguard Worker 				spin_lock(&cache->lock);
89*79398b25SAndroid Build Coastguard Worker 				cache->num_waiters--;
90*79398b25SAndroid Build Coastguard Worker 				continue;
91*79398b25SAndroid Build Coastguard Worker 			}
92*79398b25SAndroid Build Coastguard Worker 
93*79398b25SAndroid Build Coastguard Worker 			/*
94*79398b25SAndroid Build Coastguard Worker 			 * At least one unused cache entry.  A simple
95*79398b25SAndroid Build Coastguard Worker 			 * round-robin strategy is used to choose the entry to
96*79398b25SAndroid Build Coastguard Worker 			 * be evicted from the cache.
97*79398b25SAndroid Build Coastguard Worker 			 */
98*79398b25SAndroid Build Coastguard Worker 			i = cache->next_blk;
99*79398b25SAndroid Build Coastguard Worker 			for (n = 0; n < cache->entries; n++) {
100*79398b25SAndroid Build Coastguard Worker 				if (cache->entry[i].refcount == 0)
101*79398b25SAndroid Build Coastguard Worker 					break;
102*79398b25SAndroid Build Coastguard Worker 				i = (i + 1) % cache->entries;
103*79398b25SAndroid Build Coastguard Worker 			}
104*79398b25SAndroid Build Coastguard Worker 
105*79398b25SAndroid Build Coastguard Worker 			cache->next_blk = (i + 1) % cache->entries;
106*79398b25SAndroid Build Coastguard Worker 			entry = &cache->entry[i];
107*79398b25SAndroid Build Coastguard Worker 
108*79398b25SAndroid Build Coastguard Worker 			/*
109*79398b25SAndroid Build Coastguard Worker 			 * Initialise choosen cache entry, and fill it in from
110*79398b25SAndroid Build Coastguard Worker 			 * disk.
111*79398b25SAndroid Build Coastguard Worker 			 */
112*79398b25SAndroid Build Coastguard Worker 			cache->unused--;
113*79398b25SAndroid Build Coastguard Worker 			entry->block = block;
114*79398b25SAndroid Build Coastguard Worker 			entry->refcount = 1;
115*79398b25SAndroid Build Coastguard Worker 			entry->pending = 1;
116*79398b25SAndroid Build Coastguard Worker 			entry->num_waiters = 0;
117*79398b25SAndroid Build Coastguard Worker 			entry->error = 0;
118*79398b25SAndroid Build Coastguard Worker 			spin_unlock(&cache->lock);
119*79398b25SAndroid Build Coastguard Worker 
120*79398b25SAndroid Build Coastguard Worker 			entry->length = squashfs_read_data(sb, entry->data,
121*79398b25SAndroid Build Coastguard Worker 				block, length, &entry->next_index,
122*79398b25SAndroid Build Coastguard Worker 				cache->block_size);
123*79398b25SAndroid Build Coastguard Worker 
124*79398b25SAndroid Build Coastguard Worker 			spin_lock(&cache->lock);
125*79398b25SAndroid Build Coastguard Worker 
126*79398b25SAndroid Build Coastguard Worker 			if (entry->length < 0)
127*79398b25SAndroid Build Coastguard Worker 				entry->error = entry->length;
128*79398b25SAndroid Build Coastguard Worker 
129*79398b25SAndroid Build Coastguard Worker 			entry->pending = 0;
130*79398b25SAndroid Build Coastguard Worker 
131*79398b25SAndroid Build Coastguard Worker 			/*
132*79398b25SAndroid Build Coastguard Worker 			 * While filling this entry one or more other processes
133*79398b25SAndroid Build Coastguard Worker 			 * have looked it up in the cache, and have slept
134*79398b25SAndroid Build Coastguard Worker 			 * waiting for it to become available.
135*79398b25SAndroid Build Coastguard Worker 			 */
136*79398b25SAndroid Build Coastguard Worker 			if (entry->num_waiters) {
137*79398b25SAndroid Build Coastguard Worker 				spin_unlock(&cache->lock);
138*79398b25SAndroid Build Coastguard Worker 				wake_up_all(&entry->wait_queue);
139*79398b25SAndroid Build Coastguard Worker 			} else
140*79398b25SAndroid Build Coastguard Worker 				spin_unlock(&cache->lock);
141*79398b25SAndroid Build Coastguard Worker 
142*79398b25SAndroid Build Coastguard Worker 			goto out;
143*79398b25SAndroid Build Coastguard Worker 		}
144*79398b25SAndroid Build Coastguard Worker 
145*79398b25SAndroid Build Coastguard Worker 		/*
146*79398b25SAndroid Build Coastguard Worker 		 * Block already in cache.  Increment refcount so it doesn't
147*79398b25SAndroid Build Coastguard Worker 		 * get reused until we're finished with it, if it was
148*79398b25SAndroid Build Coastguard Worker 		 * previously unused there's one less cache entry available
149*79398b25SAndroid Build Coastguard Worker 		 * for reuse.
150*79398b25SAndroid Build Coastguard Worker 		 */
151*79398b25SAndroid Build Coastguard Worker 		entry = &cache->entry[i];
152*79398b25SAndroid Build Coastguard Worker 		if (entry->refcount == 0)
153*79398b25SAndroid Build Coastguard Worker 			cache->unused--;
154*79398b25SAndroid Build Coastguard Worker 		entry->refcount++;
155*79398b25SAndroid Build Coastguard Worker 
156*79398b25SAndroid Build Coastguard Worker 		/*
157*79398b25SAndroid Build Coastguard Worker 		 * If the entry is currently being filled in by another process
158*79398b25SAndroid Build Coastguard Worker 		 * go to sleep waiting for it to become available.
159*79398b25SAndroid Build Coastguard Worker 		 */
160*79398b25SAndroid Build Coastguard Worker 		if (entry->pending) {
161*79398b25SAndroid Build Coastguard Worker 			entry->num_waiters++;
162*79398b25SAndroid Build Coastguard Worker 			spin_unlock(&cache->lock);
163*79398b25SAndroid Build Coastguard Worker 			wait_event(entry->wait_queue, !entry->pending);
164*79398b25SAndroid Build Coastguard Worker 		} else
165*79398b25SAndroid Build Coastguard Worker 			spin_unlock(&cache->lock);
166*79398b25SAndroid Build Coastguard Worker 
167*79398b25SAndroid Build Coastguard Worker 		goto out;
168*79398b25SAndroid Build Coastguard Worker 	}
169*79398b25SAndroid Build Coastguard Worker 
170*79398b25SAndroid Build Coastguard Worker out:
171*79398b25SAndroid Build Coastguard Worker 	TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
172*79398b25SAndroid Build Coastguard Worker 		cache->name, i, entry->block, entry->refcount, entry->error);
173*79398b25SAndroid Build Coastguard Worker 
174*79398b25SAndroid Build Coastguard Worker 	if (entry->error)
175*79398b25SAndroid Build Coastguard Worker 		ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
176*79398b25SAndroid Build Coastguard Worker 							block);
177*79398b25SAndroid Build Coastguard Worker 	return entry;
178*79398b25SAndroid Build Coastguard Worker }
179*79398b25SAndroid Build Coastguard Worker 
180*79398b25SAndroid Build Coastguard Worker 
181*79398b25SAndroid Build Coastguard Worker /*
182*79398b25SAndroid Build Coastguard Worker  * Release cache entry, once usage count is zero it can be reused.
183*79398b25SAndroid Build Coastguard Worker  */
squashfs_cache_put(struct squashfs_cache_entry * entry)184*79398b25SAndroid Build Coastguard Worker void squashfs_cache_put(struct squashfs_cache_entry *entry)
185*79398b25SAndroid Build Coastguard Worker {
186*79398b25SAndroid Build Coastguard Worker 	struct squashfs_cache *cache = entry->cache;
187*79398b25SAndroid Build Coastguard Worker 
188*79398b25SAndroid Build Coastguard Worker 	spin_lock(&cache->lock);
189*79398b25SAndroid Build Coastguard Worker 	entry->refcount--;
190*79398b25SAndroid Build Coastguard Worker 	if (entry->refcount == 0) {
191*79398b25SAndroid Build Coastguard Worker 		cache->unused++;
192*79398b25SAndroid Build Coastguard Worker 		/*
193*79398b25SAndroid Build Coastguard Worker 		 * If there's any processes waiting for a block to become
194*79398b25SAndroid Build Coastguard Worker 		 * available, wake one up.
195*79398b25SAndroid Build Coastguard Worker 		 */
196*79398b25SAndroid Build Coastguard Worker 		if (cache->num_waiters) {
197*79398b25SAndroid Build Coastguard Worker 			spin_unlock(&cache->lock);
198*79398b25SAndroid Build Coastguard Worker 			wake_up(&cache->wait_queue);
199*79398b25SAndroid Build Coastguard Worker 			return;
200*79398b25SAndroid Build Coastguard Worker 		}
201*79398b25SAndroid Build Coastguard Worker 	}
202*79398b25SAndroid Build Coastguard Worker 	spin_unlock(&cache->lock);
203*79398b25SAndroid Build Coastguard Worker }
204*79398b25SAndroid Build Coastguard Worker 
205*79398b25SAndroid Build Coastguard Worker /*
206*79398b25SAndroid Build Coastguard Worker  * Delete cache reclaiming all kmalloced buffers.
207*79398b25SAndroid Build Coastguard Worker  */
squashfs_cache_delete(struct squashfs_cache * cache)208*79398b25SAndroid Build Coastguard Worker void squashfs_cache_delete(struct squashfs_cache *cache)
209*79398b25SAndroid Build Coastguard Worker {
210*79398b25SAndroid Build Coastguard Worker 	int i, j;
211*79398b25SAndroid Build Coastguard Worker 
212*79398b25SAndroid Build Coastguard Worker 	if (cache == NULL)
213*79398b25SAndroid Build Coastguard Worker 		return;
214*79398b25SAndroid Build Coastguard Worker 
215*79398b25SAndroid Build Coastguard Worker 	for (i = 0; i < cache->entries; i++) {
216*79398b25SAndroid Build Coastguard Worker 		if (cache->entry[i].data) {
217*79398b25SAndroid Build Coastguard Worker 			for (j = 0; j < cache->pages; j++)
218*79398b25SAndroid Build Coastguard Worker 				kfree(cache->entry[i].data[j]);
219*79398b25SAndroid Build Coastguard Worker 			kfree(cache->entry[i].data);
220*79398b25SAndroid Build Coastguard Worker 		}
221*79398b25SAndroid Build Coastguard Worker 	}
222*79398b25SAndroid Build Coastguard Worker 
223*79398b25SAndroid Build Coastguard Worker 	kfree(cache->entry);
224*79398b25SAndroid Build Coastguard Worker 	kfree(cache);
225*79398b25SAndroid Build Coastguard Worker }
226*79398b25SAndroid Build Coastguard Worker 
227*79398b25SAndroid Build Coastguard Worker 
228*79398b25SAndroid Build Coastguard Worker /*
229*79398b25SAndroid Build Coastguard Worker  * Initialise cache allocating the specified number of entries, each of
230*79398b25SAndroid Build Coastguard Worker  * size block_size.  To avoid vmalloc fragmentation issues each entry
231*79398b25SAndroid Build Coastguard Worker  * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
232*79398b25SAndroid Build Coastguard Worker  */
squashfs_cache_init(char * name,int entries,int block_size)233*79398b25SAndroid Build Coastguard Worker struct squashfs_cache *squashfs_cache_init(char *name, int entries,
234*79398b25SAndroid Build Coastguard Worker 	int block_size)
235*79398b25SAndroid Build Coastguard Worker {
236*79398b25SAndroid Build Coastguard Worker 	int i, j;
237*79398b25SAndroid Build Coastguard Worker 	struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
238*79398b25SAndroid Build Coastguard Worker 
239*79398b25SAndroid Build Coastguard Worker 	if (cache == NULL) {
240*79398b25SAndroid Build Coastguard Worker 		ERROR("Failed to allocate %s cache\n", name);
241*79398b25SAndroid Build Coastguard Worker 		return NULL;
242*79398b25SAndroid Build Coastguard Worker 	}
243*79398b25SAndroid Build Coastguard Worker 
244*79398b25SAndroid Build Coastguard Worker 	cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
245*79398b25SAndroid Build Coastguard Worker 	if (cache->entry == NULL) {
246*79398b25SAndroid Build Coastguard Worker 		ERROR("Failed to allocate %s cache\n", name);
247*79398b25SAndroid Build Coastguard Worker 		goto cleanup;
248*79398b25SAndroid Build Coastguard Worker 	}
249*79398b25SAndroid Build Coastguard Worker 
250*79398b25SAndroid Build Coastguard Worker 	cache->next_blk = 0;
251*79398b25SAndroid Build Coastguard Worker 	cache->unused = entries;
252*79398b25SAndroid Build Coastguard Worker 	cache->entries = entries;
253*79398b25SAndroid Build Coastguard Worker 	cache->block_size = block_size;
254*79398b25SAndroid Build Coastguard Worker 	cache->pages = block_size >> PAGE_CACHE_SHIFT;
255*79398b25SAndroid Build Coastguard Worker 	cache->name = name;
256*79398b25SAndroid Build Coastguard Worker 	cache->num_waiters = 0;
257*79398b25SAndroid Build Coastguard Worker 	spin_lock_init(&cache->lock);
258*79398b25SAndroid Build Coastguard Worker 	init_waitqueue_head(&cache->wait_queue);
259*79398b25SAndroid Build Coastguard Worker 
260*79398b25SAndroid Build Coastguard Worker 	for (i = 0; i < entries; i++) {
261*79398b25SAndroid Build Coastguard Worker 		struct squashfs_cache_entry *entry = &cache->entry[i];
262*79398b25SAndroid Build Coastguard Worker 
263*79398b25SAndroid Build Coastguard Worker 		init_waitqueue_head(&cache->entry[i].wait_queue);
264*79398b25SAndroid Build Coastguard Worker 		entry->cache = cache;
265*79398b25SAndroid Build Coastguard Worker 		entry->block = SQUASHFS_INVALID_BLK;
266*79398b25SAndroid Build Coastguard Worker 		entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
267*79398b25SAndroid Build Coastguard Worker 		if (entry->data == NULL) {
268*79398b25SAndroid Build Coastguard Worker 			ERROR("Failed to allocate %s cache entry\n", name);
269*79398b25SAndroid Build Coastguard Worker 			goto cleanup;
270*79398b25SAndroid Build Coastguard Worker 		}
271*79398b25SAndroid Build Coastguard Worker 
272*79398b25SAndroid Build Coastguard Worker 		for (j = 0; j < cache->pages; j++) {
273*79398b25SAndroid Build Coastguard Worker 			entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
274*79398b25SAndroid Build Coastguard Worker 			if (entry->data[j] == NULL) {
275*79398b25SAndroid Build Coastguard Worker 				ERROR("Failed to allocate %s buffer\n", name);
276*79398b25SAndroid Build Coastguard Worker 				goto cleanup;
277*79398b25SAndroid Build Coastguard Worker 			}
278*79398b25SAndroid Build Coastguard Worker 		}
279*79398b25SAndroid Build Coastguard Worker 	}
280*79398b25SAndroid Build Coastguard Worker 
281*79398b25SAndroid Build Coastguard Worker 	return cache;
282*79398b25SAndroid Build Coastguard Worker 
283*79398b25SAndroid Build Coastguard Worker cleanup:
284*79398b25SAndroid Build Coastguard Worker 	squashfs_cache_delete(cache);
285*79398b25SAndroid Build Coastguard Worker 	return NULL;
286*79398b25SAndroid Build Coastguard Worker }
287*79398b25SAndroid Build Coastguard Worker 
288*79398b25SAndroid Build Coastguard Worker 
289*79398b25SAndroid Build Coastguard Worker /*
290*79398b25SAndroid Build Coastguard Worker  * Copy upto length bytes from cache entry to buffer starting at offset bytes
291*79398b25SAndroid Build Coastguard Worker  * into the cache entry.  If there's not length bytes then copy the number of
292*79398b25SAndroid Build Coastguard Worker  * bytes available.  In all cases return the number of bytes copied.
293*79398b25SAndroid Build Coastguard Worker  */
squashfs_copy_data(void * buffer,struct squashfs_cache_entry * entry,int offset,int length)294*79398b25SAndroid Build Coastguard Worker int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
295*79398b25SAndroid Build Coastguard Worker 		int offset, int length)
296*79398b25SAndroid Build Coastguard Worker {
297*79398b25SAndroid Build Coastguard Worker 	int remaining = length;
298*79398b25SAndroid Build Coastguard Worker 
299*79398b25SAndroid Build Coastguard Worker 	if (length == 0)
300*79398b25SAndroid Build Coastguard Worker 		return 0;
301*79398b25SAndroid Build Coastguard Worker 	else if (buffer == NULL)
302*79398b25SAndroid Build Coastguard Worker 		return min(length, entry->length - offset);
303*79398b25SAndroid Build Coastguard Worker 
304*79398b25SAndroid Build Coastguard Worker 	while (offset < entry->length) {
305*79398b25SAndroid Build Coastguard Worker 		void *buff = entry->data[offset / PAGE_CACHE_SIZE]
306*79398b25SAndroid Build Coastguard Worker 				+ (offset % PAGE_CACHE_SIZE);
307*79398b25SAndroid Build Coastguard Worker 		int bytes = min_t(int, entry->length - offset,
308*79398b25SAndroid Build Coastguard Worker 				PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
309*79398b25SAndroid Build Coastguard Worker 
310*79398b25SAndroid Build Coastguard Worker 		if (bytes >= remaining) {
311*79398b25SAndroid Build Coastguard Worker 			memcpy(buffer, buff, remaining);
312*79398b25SAndroid Build Coastguard Worker 			remaining = 0;
313*79398b25SAndroid Build Coastguard Worker 			break;
314*79398b25SAndroid Build Coastguard Worker 		}
315*79398b25SAndroid Build Coastguard Worker 
316*79398b25SAndroid Build Coastguard Worker 		memcpy(buffer, buff, bytes);
317*79398b25SAndroid Build Coastguard Worker 		buffer += bytes;
318*79398b25SAndroid Build Coastguard Worker 		remaining -= bytes;
319*79398b25SAndroid Build Coastguard Worker 		offset += bytes;
320*79398b25SAndroid Build Coastguard Worker 	}
321*79398b25SAndroid Build Coastguard Worker 
322*79398b25SAndroid Build Coastguard Worker 	return length - remaining;
323*79398b25SAndroid Build Coastguard Worker }
324*79398b25SAndroid Build Coastguard Worker 
325*79398b25SAndroid Build Coastguard Worker 
326*79398b25SAndroid Build Coastguard Worker /*
327*79398b25SAndroid Build Coastguard Worker  * Read length bytes from metadata position <block, offset> (block is the
328*79398b25SAndroid Build Coastguard Worker  * start of the compressed block on disk, and offset is the offset into
329*79398b25SAndroid Build Coastguard Worker  * the block once decompressed).  Data is packed into consecutive blocks,
330*79398b25SAndroid Build Coastguard Worker  * and length bytes may require reading more than one block.
331*79398b25SAndroid Build Coastguard Worker  */
squashfs_read_metadata(struct super_block * sb,void * buffer,u64 * block,int * offset,int length)332*79398b25SAndroid Build Coastguard Worker int squashfs_read_metadata(struct super_block *sb, void *buffer,
333*79398b25SAndroid Build Coastguard Worker 		u64 *block, int *offset, int length)
334*79398b25SAndroid Build Coastguard Worker {
335*79398b25SAndroid Build Coastguard Worker 	struct squashfs_sb_info *msblk = sb->s_fs_info;
336*79398b25SAndroid Build Coastguard Worker 	int bytes, copied = length;
337*79398b25SAndroid Build Coastguard Worker 	struct squashfs_cache_entry *entry;
338*79398b25SAndroid Build Coastguard Worker 
339*79398b25SAndroid Build Coastguard Worker 	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
340*79398b25SAndroid Build Coastguard Worker 
341*79398b25SAndroid Build Coastguard Worker 	while (length) {
342*79398b25SAndroid Build Coastguard Worker 		entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
343*79398b25SAndroid Build Coastguard Worker 		if (entry->error)
344*79398b25SAndroid Build Coastguard Worker 			return entry->error;
345*79398b25SAndroid Build Coastguard Worker 		else if (*offset >= entry->length)
346*79398b25SAndroid Build Coastguard Worker 			return -EIO;
347*79398b25SAndroid Build Coastguard Worker 
348*79398b25SAndroid Build Coastguard Worker 		bytes = squashfs_copy_data(buffer, entry, *offset, length);
349*79398b25SAndroid Build Coastguard Worker 		if (buffer)
350*79398b25SAndroid Build Coastguard Worker 			buffer += bytes;
351*79398b25SAndroid Build Coastguard Worker 		length -= bytes;
352*79398b25SAndroid Build Coastguard Worker 		*offset += bytes;
353*79398b25SAndroid Build Coastguard Worker 
354*79398b25SAndroid Build Coastguard Worker 		if (*offset == entry->length) {
355*79398b25SAndroid Build Coastguard Worker 			*block = entry->next_index;
356*79398b25SAndroid Build Coastguard Worker 			*offset = 0;
357*79398b25SAndroid Build Coastguard Worker 		}
358*79398b25SAndroid Build Coastguard Worker 
359*79398b25SAndroid Build Coastguard Worker 		squashfs_cache_put(entry);
360*79398b25SAndroid Build Coastguard Worker 	}
361*79398b25SAndroid Build Coastguard Worker 
362*79398b25SAndroid Build Coastguard Worker 	return copied;
363*79398b25SAndroid Build Coastguard Worker }
364*79398b25SAndroid Build Coastguard Worker 
365*79398b25SAndroid Build Coastguard Worker 
366*79398b25SAndroid Build Coastguard Worker /*
367*79398b25SAndroid Build Coastguard Worker  * Look-up in the fragmment cache the fragment located at <start_block> in the
368*79398b25SAndroid Build Coastguard Worker  * filesystem.  If necessary read and decompress it from disk.
369*79398b25SAndroid Build Coastguard Worker  */
squashfs_get_fragment(struct super_block * sb,u64 start_block,int length)370*79398b25SAndroid Build Coastguard Worker struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
371*79398b25SAndroid Build Coastguard Worker 				u64 start_block, int length)
372*79398b25SAndroid Build Coastguard Worker {
373*79398b25SAndroid Build Coastguard Worker 	struct squashfs_sb_info *msblk = sb->s_fs_info;
374*79398b25SAndroid Build Coastguard Worker 
375*79398b25SAndroid Build Coastguard Worker 	return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
376*79398b25SAndroid Build Coastguard Worker 		length);
377*79398b25SAndroid Build Coastguard Worker }
378*79398b25SAndroid Build Coastguard Worker 
379*79398b25SAndroid Build Coastguard Worker 
380*79398b25SAndroid Build Coastguard Worker /*
381*79398b25SAndroid Build Coastguard Worker  * Read and decompress the datablock located at <start_block> in the
382*79398b25SAndroid Build Coastguard Worker  * filesystem.  The cache is used here to avoid duplicating locking and
383*79398b25SAndroid Build Coastguard Worker  * read/decompress code.
384*79398b25SAndroid Build Coastguard Worker  */
squashfs_get_datablock(struct super_block * sb,u64 start_block,int length)385*79398b25SAndroid Build Coastguard Worker struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
386*79398b25SAndroid Build Coastguard Worker 				u64 start_block, int length)
387*79398b25SAndroid Build Coastguard Worker {
388*79398b25SAndroid Build Coastguard Worker 	struct squashfs_sb_info *msblk = sb->s_fs_info;
389*79398b25SAndroid Build Coastguard Worker 
390*79398b25SAndroid Build Coastguard Worker 	return squashfs_cache_get(sb, msblk->read_page, start_block, length);
391*79398b25SAndroid Build Coastguard Worker }
392*79398b25SAndroid Build Coastguard Worker 
393*79398b25SAndroid Build Coastguard Worker 
394*79398b25SAndroid Build Coastguard Worker /*
395*79398b25SAndroid Build Coastguard Worker  * Read a filesystem table (uncompressed sequence of bytes) from disk
396*79398b25SAndroid Build Coastguard Worker  */
squashfs_read_table(struct super_block * sb,void * buffer,u64 block,int length)397*79398b25SAndroid Build Coastguard Worker int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
398*79398b25SAndroid Build Coastguard Worker 	int length)
399*79398b25SAndroid Build Coastguard Worker {
400*79398b25SAndroid Build Coastguard Worker 	int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
401*79398b25SAndroid Build Coastguard Worker 	int i, res;
402*79398b25SAndroid Build Coastguard Worker 	void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
403*79398b25SAndroid Build Coastguard Worker 	if (data == NULL)
404*79398b25SAndroid Build Coastguard Worker 		return -ENOMEM;
405*79398b25SAndroid Build Coastguard Worker 
406*79398b25SAndroid Build Coastguard Worker 	for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
407*79398b25SAndroid Build Coastguard Worker 		data[i] = buffer;
408*79398b25SAndroid Build Coastguard Worker 	res = squashfs_read_data(sb, data, block, length |
409*79398b25SAndroid Build Coastguard Worker 		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length);
410*79398b25SAndroid Build Coastguard Worker 	kfree(data);
411*79398b25SAndroid Build Coastguard Worker 	return res;
412*79398b25SAndroid Build Coastguard Worker }
413