erofs: don't trigger WARN() when decompression fails
[platform/kernel/linux-rpi.git] / fs / erofs / decompressor.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "compress.h"
7 #include <linux/module.h>
8 #include <linux/lz4.h>
9
10 #ifndef LZ4_DISTANCE_MAX        /* history window size */
11 #define LZ4_DISTANCE_MAX 65535  /* set to maximum value by default */
12 #endif
13
14 #define LZ4_MAX_DISTANCE_PAGES  (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
17 #endif
18
19 struct z_erofs_decompressor {
20         /*
21          * if destpages have sparsed pages, fill them with bounce pages.
22          * it also check whether destpages indicate continuous physical memory.
23          */
24         int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
25                                  struct list_head *pagepool);
26         int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
27         char *name;
28 };
29
30 int z_erofs_load_lz4_config(struct super_block *sb,
31                             struct erofs_super_block *dsb,
32                             struct z_erofs_lz4_cfgs *lz4, int size)
33 {
34         struct erofs_sb_info *sbi = EROFS_SB(sb);
35         u16 distance;
36
37         if (lz4) {
38                 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
39                         erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
40                         return -EINVAL;
41                 }
42                 distance = le16_to_cpu(lz4->max_distance);
43
44                 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
45                 if (!sbi->lz4.max_pclusterblks) {
46                         sbi->lz4.max_pclusterblks = 1;  /* reserved case */
47                 } else if (sbi->lz4.max_pclusterblks >
48                            Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
49                         erofs_err(sb, "too large lz4 pclusterblks %u",
50                                   sbi->lz4.max_pclusterblks);
51                         return -EINVAL;
52                 } else if (sbi->lz4.max_pclusterblks >= 2) {
53                         erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
54                 }
55         } else {
56                 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
57                 sbi->lz4.max_pclusterblks = 1;
58         }
59
60         sbi->lz4.max_distance_pages = distance ?
61                                         DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
62                                         LZ4_MAX_DISTANCE_PAGES;
63         return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
64 }
65
66 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
67                                          struct list_head *pagepool)
68 {
69         const unsigned int nr =
70                 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
71         struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
72         unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
73                                            BITS_PER_LONG)] = { 0 };
74         unsigned int lz4_max_distance_pages =
75                                 EROFS_SB(rq->sb)->lz4.max_distance_pages;
76         void *kaddr = NULL;
77         unsigned int i, j, top;
78
79         top = 0;
80         for (i = j = 0; i < nr; ++i, ++j) {
81                 struct page *const page = rq->out[i];
82                 struct page *victim;
83
84                 if (j >= lz4_max_distance_pages)
85                         j = 0;
86
87                 /* 'valid' bounced can only be tested after a complete round */
88                 if (test_bit(j, bounced)) {
89                         DBG_BUGON(i < lz4_max_distance_pages);
90                         DBG_BUGON(top >= lz4_max_distance_pages);
91                         availables[top++] = rq->out[i - lz4_max_distance_pages];
92                 }
93
94                 if (page) {
95                         __clear_bit(j, bounced);
96                         if (kaddr) {
97                                 if (kaddr + PAGE_SIZE == page_address(page))
98                                         kaddr += PAGE_SIZE;
99                                 else
100                                         kaddr = NULL;
101                         } else if (!i) {
102                                 kaddr = page_address(page);
103                         }
104                         continue;
105                 }
106                 kaddr = NULL;
107                 __set_bit(j, bounced);
108
109                 if (top) {
110                         victim = availables[--top];
111                         get_page(victim);
112                 } else {
113                         victim = erofs_allocpage(pagepool,
114                                                  GFP_KERNEL | __GFP_NOFAIL);
115                         set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
116                 }
117                 rq->out[i] = victim;
118         }
119         return kaddr ? 1 : 0;
120 }
121
122 static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
123                         void *inpage, unsigned int *inputmargin, int *maptype,
124                         bool support_0padding)
125 {
126         unsigned int nrpages_in, nrpages_out;
127         unsigned int ofull, oend, inputsize, total, i, j;
128         struct page **in;
129         void *src, *tmp;
130
131         inputsize = rq->inputsize;
132         nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
133         oend = rq->pageofs_out + rq->outputsize;
134         ofull = PAGE_ALIGN(oend);
135         nrpages_out = ofull >> PAGE_SHIFT;
136
137         if (rq->inplace_io) {
138                 if (rq->partial_decoding || !support_0padding ||
139                     ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
140                         goto docopy;
141
142                 for (i = 0; i < nrpages_in; ++i) {
143                         DBG_BUGON(rq->in[i] == NULL);
144                         for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
145                                 if (rq->out[j] == rq->in[i])
146                                         goto docopy;
147                 }
148         }
149
150         if (nrpages_in <= 1) {
151                 *maptype = 0;
152                 return inpage;
153         }
154         kunmap_atomic(inpage);
155         might_sleep();
156         src = erofs_vm_map_ram(rq->in, nrpages_in);
157         if (!src)
158                 return ERR_PTR(-ENOMEM);
159         *maptype = 1;
160         return src;
161
162 docopy:
163         /* Or copy compressed data which can be overlapped to per-CPU buffer */
164         in = rq->in;
165         src = erofs_get_pcpubuf(nrpages_in);
166         if (!src) {
167                 DBG_BUGON(1);
168                 kunmap_atomic(inpage);
169                 return ERR_PTR(-EFAULT);
170         }
171
172         tmp = src;
173         total = rq->inputsize;
174         while (total) {
175                 unsigned int page_copycnt =
176                         min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
177
178                 if (!inpage)
179                         inpage = kmap_atomic(*in);
180                 memcpy(tmp, inpage + *inputmargin, page_copycnt);
181                 kunmap_atomic(inpage);
182                 inpage = NULL;
183                 tmp += page_copycnt;
184                 total -= page_copycnt;
185                 ++in;
186                 *inputmargin = 0;
187         }
188         *maptype = 2;
189         return src;
190 }
191
192 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
193 {
194         unsigned int inputmargin;
195         u8 *headpage, *src;
196         bool support_0padding;
197         int ret, maptype;
198
199         DBG_BUGON(*rq->in == NULL);
200         headpage = kmap_atomic(*rq->in);
201         inputmargin = 0;
202         support_0padding = false;
203
204         /* decompression inplace is only safe when 0padding is enabled */
205         if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
206                 support_0padding = true;
207
208                 while (!headpage[inputmargin & ~PAGE_MASK])
209                         if (!(++inputmargin & ~PAGE_MASK))
210                                 break;
211
212                 if (inputmargin >= rq->inputsize) {
213                         kunmap_atomic(headpage);
214                         return -EIO;
215                 }
216         }
217
218         rq->inputsize -= inputmargin;
219         src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
220                                         support_0padding);
221         if (IS_ERR(src))
222                 return PTR_ERR(src);
223
224         /* legacy format could compress extra data in a pcluster. */
225         if (rq->partial_decoding || !support_0padding)
226                 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
227                                 rq->inputsize, rq->outputsize, rq->outputsize);
228         else
229                 ret = LZ4_decompress_safe(src + inputmargin, out,
230                                           rq->inputsize, rq->outputsize);
231
232         if (ret != rq->outputsize) {
233                 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
234                           ret, rq->inputsize, inputmargin, rq->outputsize);
235
236                 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
237                                16, 1, src + inputmargin, rq->inputsize, true);
238                 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
239                                16, 1, out, rq->outputsize, true);
240
241                 if (ret >= 0)
242                         memset(out + ret, 0, rq->outputsize - ret);
243                 ret = -EIO;
244         }
245
246         if (maptype == 0) {
247                 kunmap_atomic(src);
248         } else if (maptype == 1) {
249                 vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
250         } else if (maptype == 2) {
251                 erofs_put_pcpubuf(src);
252         } else {
253                 DBG_BUGON(1);
254                 return -EFAULT;
255         }
256         return ret;
257 }
258
259 static struct z_erofs_decompressor decompressors[] = {
260         [Z_EROFS_COMPRESSION_SHIFTED] = {
261                 .name = "shifted"
262         },
263         [Z_EROFS_COMPRESSION_LZ4] = {
264                 .prepare_destpages = z_erofs_lz4_prepare_destpages,
265                 .decompress = z_erofs_lz4_decompress,
266                 .name = "lz4"
267         },
268 };
269
270 static void copy_from_pcpubuf(struct page **out, const char *dst,
271                               unsigned short pageofs_out,
272                               unsigned int outputsize)
273 {
274         const char *end = dst + outputsize;
275         const unsigned int righthalf = PAGE_SIZE - pageofs_out;
276         const char *cur = dst - pageofs_out;
277
278         while (cur < end) {
279                 struct page *const page = *out++;
280
281                 if (page) {
282                         char *buf = kmap_atomic(page);
283
284                         if (cur >= dst) {
285                                 memcpy(buf, cur, min_t(uint, PAGE_SIZE,
286                                                        end - cur));
287                         } else {
288                                 memcpy(buf + pageofs_out, cur + pageofs_out,
289                                        min_t(uint, righthalf, end - cur));
290                         }
291                         kunmap_atomic(buf);
292                 }
293                 cur += PAGE_SIZE;
294         }
295 }
296
297 static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
298                                       struct list_head *pagepool)
299 {
300         const unsigned int nrpages_out =
301                 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
302         const struct z_erofs_decompressor *alg = decompressors + rq->alg;
303         unsigned int dst_maptype;
304         void *dst;
305         int ret;
306
307         /* two optimized fast paths only for non bigpcluster cases yet */
308         if (rq->inputsize <= PAGE_SIZE) {
309                 if (nrpages_out == 1 && !rq->inplace_io) {
310                         DBG_BUGON(!*rq->out);
311                         dst = kmap_atomic(*rq->out);
312                         dst_maptype = 0;
313                         goto dstmap_out;
314                 }
315
316                 /*
317                  * For the case of small output size (especially much less
318                  * than PAGE_SIZE), memcpy the decompressed data rather than
319                  * compressed data is preferred.
320                  */
321                 if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
322                         dst = erofs_get_pcpubuf(1);
323                         if (IS_ERR(dst))
324                                 return PTR_ERR(dst);
325
326                         rq->inplace_io = false;
327                         ret = alg->decompress(rq, dst);
328                         if (!ret)
329                                 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
330                                                   rq->outputsize);
331
332                         erofs_put_pcpubuf(dst);
333                         return ret;
334                 }
335         }
336
337         /* general decoding path which can be used for all cases */
338         ret = alg->prepare_destpages(rq, pagepool);
339         if (ret < 0)
340                 return ret;
341         if (ret) {
342                 dst = page_address(*rq->out);
343                 dst_maptype = 1;
344                 goto dstmap_out;
345         }
346
347         dst = erofs_vm_map_ram(rq->out, nrpages_out);
348         if (!dst)
349                 return -ENOMEM;
350         dst_maptype = 2;
351
352 dstmap_out:
353         ret = alg->decompress(rq, dst + rq->pageofs_out);
354
355         if (!dst_maptype)
356                 kunmap_atomic(dst);
357         else if (dst_maptype == 2)
358                 vm_unmap_ram(dst, nrpages_out);
359         return ret;
360 }
361
362 static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
363                                      struct list_head *pagepool)
364 {
365         const unsigned int nrpages_out =
366                 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
367         const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
368         unsigned char *src, *dst;
369
370         if (nrpages_out > 2) {
371                 DBG_BUGON(1);
372                 return -EIO;
373         }
374
375         if (rq->out[0] == *rq->in) {
376                 DBG_BUGON(nrpages_out != 1);
377                 return 0;
378         }
379
380         src = kmap_atomic(*rq->in);
381         if (rq->out[0]) {
382                 dst = kmap_atomic(rq->out[0]);
383                 memcpy(dst + rq->pageofs_out, src, righthalf);
384                 kunmap_atomic(dst);
385         }
386
387         if (nrpages_out == 2) {
388                 DBG_BUGON(!rq->out[1]);
389                 if (rq->out[1] == *rq->in) {
390                         memmove(src, src + righthalf, rq->pageofs_out);
391                 } else {
392                         dst = kmap_atomic(rq->out[1]);
393                         memcpy(dst, src + righthalf, rq->pageofs_out);
394                         kunmap_atomic(dst);
395                 }
396         }
397         kunmap_atomic(src);
398         return 0;
399 }
400
401 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
402                        struct list_head *pagepool)
403 {
404         if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
405                 return z_erofs_shifted_transform(rq, pagepool);
406         return z_erofs_decompress_generic(rq, pagepool);
407 }