Merge tag 'sound-fix-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[platform/kernel/linux-rpi.git] / fs / squashfs / block.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Squashfs - a compressed read only filesystem for Linux
4  *
5  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
6  * Phillip Lougher <phillip@squashfs.org.uk>
7  *
8  * block.c
9  */
10
11 /*
12  * This file implements the low-level routines to read and decompress
13  * datablocks and metadata blocks.
14  */
15
16 #include <linux/blkdev.h>
17 #include <linux/fs.h>
18 #include <linux/vfs.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/string.h>
22 #include <linux/bio.h>
23
24 #include "squashfs_fs.h"
25 #include "squashfs_fs_sb.h"
26 #include "squashfs.h"
27 #include "decompressor.h"
28 #include "page_actor.h"
29
30 /*
31  * Returns the amount of bytes copied to the page actor.
32  */
33 static int copy_bio_to_actor(struct bio *bio,
34                              struct squashfs_page_actor *actor,
35                              int offset, int req_length)
36 {
37         void *actor_addr;
38         struct bvec_iter_all iter_all = {};
39         struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
40         int copied_bytes = 0;
41         int actor_offset = 0;
42
43         squashfs_actor_nobuff(actor);
44         actor_addr = squashfs_first_page(actor);
45
46         if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
47                 return 0;
48
49         while (copied_bytes < req_length) {
50                 int bytes_to_copy = min_t(int, bvec->bv_len - offset,
51                                           PAGE_SIZE - actor_offset);
52
53                 bytes_to_copy = min_t(int, bytes_to_copy,
54                                       req_length - copied_bytes);
55                 if (!IS_ERR(actor_addr))
56                         memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
57                                         offset, bytes_to_copy);
58
59                 actor_offset += bytes_to_copy;
60                 copied_bytes += bytes_to_copy;
61                 offset += bytes_to_copy;
62
63                 if (actor_offset >= PAGE_SIZE) {
64                         actor_addr = squashfs_next_page(actor);
65                         if (!actor_addr)
66                                 break;
67                         actor_offset = 0;
68                 }
69                 if (offset >= bvec->bv_len) {
70                         if (!bio_next_segment(bio, &iter_all))
71                                 break;
72                         offset = 0;
73                 }
74         }
75         squashfs_finish_page(actor);
76         return copied_bytes;
77 }
78
79 static int squashfs_bio_read_cached(struct bio *fullbio,
80                 struct address_space *cache_mapping, u64 index, int length,
81                 u64 read_start, u64 read_end, int page_count)
82 {
83         struct page *head_to_cache = NULL, *tail_to_cache = NULL;
84         struct block_device *bdev = fullbio->bi_bdev;
85         int start_idx = 0, end_idx = 0;
86         struct bvec_iter_all iter_all;
87         struct bio *bio = NULL;
88         struct bio_vec *bv;
89         int idx = 0;
90         int err = 0;
91
92         bio_for_each_segment_all(bv, fullbio, iter_all) {
93                 struct page *page = bv->bv_page;
94
95                 if (page->mapping == cache_mapping) {
96                         idx++;
97                         continue;
98                 }
99
100                 /*
101                  * We only use this when the device block size is the same as
102                  * the page size, so read_start and read_end cover full pages.
103                  *
104                  * Compare these to the original required index and length to
105                  * only cache pages which were requested partially, since these
106                  * are the ones which are likely to be needed when reading
107                  * adjacent blocks.
108                  */
109                 if (idx == 0 && index != read_start)
110                         head_to_cache = page;
111                 else if (idx == page_count - 1 && index + length != read_end)
112                         tail_to_cache = page;
113
114                 if (!bio || idx != end_idx) {
115                         struct bio *new = bio_alloc_clone(bdev, fullbio,
116                                                           GFP_NOIO, &fs_bio_set);
117
118                         if (bio) {
119                                 bio_trim(bio, start_idx * PAGE_SECTORS,
120                                          (end_idx - start_idx) * PAGE_SECTORS);
121                                 bio_chain(bio, new);
122                                 submit_bio(bio);
123                         }
124
125                         bio = new;
126                         start_idx = idx;
127                 }
128
129                 idx++;
130                 end_idx = idx;
131         }
132
133         if (bio) {
134                 bio_trim(bio, start_idx * PAGE_SECTORS,
135                          (end_idx - start_idx) * PAGE_SECTORS);
136                 err = submit_bio_wait(bio);
137                 bio_put(bio);
138         }
139
140         if (err)
141                 return err;
142
143         if (head_to_cache) {
144                 int ret = add_to_page_cache_lru(head_to_cache, cache_mapping,
145                                                 read_start >> PAGE_SHIFT,
146                                                 GFP_NOIO);
147
148                 if (!ret) {
149                         SetPageUptodate(head_to_cache);
150                         unlock_page(head_to_cache);
151                 }
152
153         }
154
155         if (tail_to_cache) {
156                 int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping,
157                                                 (read_end >> PAGE_SHIFT) - 1,
158                                                 GFP_NOIO);
159
160                 if (!ret) {
161                         SetPageUptodate(tail_to_cache);
162                         unlock_page(tail_to_cache);
163                 }
164         }
165
166         return 0;
167 }
168
169 static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
170                              struct bio **biop, int *block_offset)
171 {
172         struct squashfs_sb_info *msblk = sb->s_fs_info;
173         struct address_space *cache_mapping = msblk->cache_mapping;
174         const u64 read_start = round_down(index, msblk->devblksize);
175         const sector_t block = read_start >> msblk->devblksize_log2;
176         const u64 read_end = round_up(index + length, msblk->devblksize);
177         const sector_t block_end = read_end >> msblk->devblksize_log2;
178         int offset = read_start - round_down(index, PAGE_SIZE);
179         int total_len = (block_end - block) << msblk->devblksize_log2;
180         const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE);
181         int error, i;
182         struct bio *bio;
183
184         bio = bio_kmalloc(page_count, GFP_NOIO);
185         if (!bio)
186                 return -ENOMEM;
187         bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ);
188         bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
189
190         for (i = 0; i < page_count; ++i) {
191                 unsigned int len =
192                         min_t(unsigned int, PAGE_SIZE - offset, total_len);
193                 struct page *page = NULL;
194
195                 if (cache_mapping)
196                         page = find_get_page(cache_mapping,
197                                              (read_start >> PAGE_SHIFT) + i);
198                 if (!page)
199                         page = alloc_page(GFP_NOIO);
200
201                 if (!page) {
202                         error = -ENOMEM;
203                         goto out_free_bio;
204                 }
205
206                 /*
207                  * Use the __ version to avoid merging since we need each page
208                  * to be separate when we check for and avoid cached pages.
209                  */
210                 __bio_add_page(bio, page, len, offset);
211                 offset = 0;
212                 total_len -= len;
213         }
214
215         if (cache_mapping)
216                 error = squashfs_bio_read_cached(bio, cache_mapping, index,
217                                                  length, read_start, read_end,
218                                                  page_count);
219         else
220                 error = submit_bio_wait(bio);
221         if (error)
222                 goto out_free_bio;
223
224         *biop = bio;
225         *block_offset = index & ((1 << msblk->devblksize_log2) - 1);
226         return 0;
227
228 out_free_bio:
229         bio_free_pages(bio);
230         bio_uninit(bio);
231         kfree(bio);
232         return error;
233 }
234
235 /*
236  * Read and decompress a metadata block or datablock.  Length is non-zero
237  * if a datablock is being read (the size is stored elsewhere in the
238  * filesystem), otherwise the length is obtained from the first two bytes of
239  * the metadata block.  A bit in the length field indicates if the block
240  * is stored uncompressed in the filesystem (usually because compression
241  * generated a larger block - this does occasionally happen with compression
242  * algorithms).
243  */
244 int squashfs_read_data(struct super_block *sb, u64 index, int length,
245                        u64 *next_index, struct squashfs_page_actor *output)
246 {
247         struct squashfs_sb_info *msblk = sb->s_fs_info;
248         struct bio *bio = NULL;
249         int compressed;
250         int res;
251         int offset;
252
253         if (length) {
254                 /*
255                  * Datablock.
256                  */
257                 compressed = SQUASHFS_COMPRESSED_BLOCK(length);
258                 length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
259                 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
260                         index, compressed ? "" : "un", length, output->length);
261         } else {
262                 /*
263                  * Metadata block.
264                  */
265                 const u8 *data;
266                 struct bvec_iter_all iter_all = {};
267                 struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
268
269                 if (index + 2 > msblk->bytes_used) {
270                         res = -EIO;
271                         goto out;
272                 }
273                 res = squashfs_bio_read(sb, index, 2, &bio, &offset);
274                 if (res)
275                         goto out;
276
277                 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
278                         res = -EIO;
279                         goto out_free_bio;
280                 }
281                 /* Extract the length of the metadata block */
282                 data = bvec_virt(bvec);
283                 length = data[offset];
284                 if (offset < bvec->bv_len - 1) {
285                         length |= data[offset + 1] << 8;
286                 } else {
287                         if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
288                                 res = -EIO;
289                                 goto out_free_bio;
290                         }
291                         data = bvec_virt(bvec);
292                         length |= data[0] << 8;
293                 }
294                 bio_free_pages(bio);
295                 bio_uninit(bio);
296                 kfree(bio);
297
298                 compressed = SQUASHFS_COMPRESSED(length);
299                 length = SQUASHFS_COMPRESSED_SIZE(length);
300                 index += 2;
301
302                 TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
303                       compressed ? "" : "un", length);
304         }
305         if (length < 0 || length > output->length ||
306                         (index + length) > msblk->bytes_used) {
307                 res = -EIO;
308                 goto out;
309         }
310
311         if (next_index)
312                 *next_index = index + length;
313
314         res = squashfs_bio_read(sb, index, length, &bio, &offset);
315         if (res)
316                 goto out;
317
318         if (compressed) {
319                 if (!msblk->stream) {
320                         res = -EIO;
321                         goto out_free_bio;
322                 }
323                 res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
324         } else {
325                 res = copy_bio_to_actor(bio, output, offset, length);
326         }
327
328 out_free_bio:
329         bio_free_pages(bio);
330         bio_uninit(bio);
331         kfree(bio);
332 out:
333         if (res < 0) {
334                 ERROR("Failed to read block 0x%llx: %d\n", index, res);
335                 if (msblk->panic_on_errors)
336                         panic("squashfs read failed");
337         }
338
339         return res;
340 }