1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <linux/module.h>
10 #ifndef LZ4_DISTANCE_MAX /* history window size */
11 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
14 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
19 struct z_erofs_decompressor {
21 * if destpages have sparsed pages, fill them with bounce pages.
22 * it also check whether destpages indicate continuous physical memory.
24 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
25 struct list_head *pagepool);
26 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
30 int z_erofs_load_lz4_config(struct super_block *sb,
31 struct erofs_super_block *dsb,
32 struct z_erofs_lz4_cfgs *lz4, int size)
34 struct erofs_sb_info *sbi = EROFS_SB(sb);
38 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
39 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
42 distance = le16_to_cpu(lz4->max_distance);
44 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
45 if (!sbi->lz4.max_pclusterblks) {
46 sbi->lz4.max_pclusterblks = 1; /* reserved case */
47 } else if (sbi->lz4.max_pclusterblks >
48 Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
49 erofs_err(sb, "too large lz4 pclusterblks %u",
50 sbi->lz4.max_pclusterblks);
52 } else if (sbi->lz4.max_pclusterblks >= 2) {
53 erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
56 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
57 sbi->lz4.max_pclusterblks = 1;
60 sbi->lz4.max_distance_pages = distance ?
61 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
62 LZ4_MAX_DISTANCE_PAGES;
63 return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
66 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
67 struct list_head *pagepool)
69 const unsigned int nr =
70 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
71 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
72 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
73 BITS_PER_LONG)] = { 0 };
74 unsigned int lz4_max_distance_pages =
75 EROFS_SB(rq->sb)->lz4.max_distance_pages;
77 unsigned int i, j, top;
80 for (i = j = 0; i < nr; ++i, ++j) {
81 struct page *const page = rq->out[i];
84 if (j >= lz4_max_distance_pages)
87 /* 'valid' bounced can only be tested after a complete round */
88 if (test_bit(j, bounced)) {
89 DBG_BUGON(i < lz4_max_distance_pages);
90 DBG_BUGON(top >= lz4_max_distance_pages);
91 availables[top++] = rq->out[i - lz4_max_distance_pages];
95 __clear_bit(j, bounced);
96 if (!PageHighMem(page)) {
98 kaddr = page_address(page);
102 kaddr + PAGE_SIZE == page_address(page)) {
111 __set_bit(j, bounced);
114 victim = availables[--top];
117 victim = erofs_allocpage(pagepool,
118 GFP_KERNEL | __GFP_NOFAIL);
119 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
123 return kaddr ? 1 : 0;
126 static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
127 void *inpage, unsigned int *inputmargin, int *maptype,
128 bool support_0padding)
130 unsigned int nrpages_in, nrpages_out;
131 unsigned int ofull, oend, inputsize, total, i, j;
135 inputsize = rq->inputsize;
136 nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
137 oend = rq->pageofs_out + rq->outputsize;
138 ofull = PAGE_ALIGN(oend);
139 nrpages_out = ofull >> PAGE_SHIFT;
141 if (rq->inplace_io) {
142 if (rq->partial_decoding || !support_0padding ||
143 ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
146 for (i = 0; i < nrpages_in; ++i) {
147 DBG_BUGON(rq->in[i] == NULL);
148 for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
149 if (rq->out[j] == rq->in[i])
154 if (nrpages_in <= 1) {
158 kunmap_atomic(inpage);
160 src = erofs_vm_map_ram(rq->in, nrpages_in);
162 return ERR_PTR(-ENOMEM);
167 /* Or copy compressed data which can be overlapped to per-CPU buffer */
169 src = erofs_get_pcpubuf(nrpages_in);
172 kunmap_atomic(inpage);
173 return ERR_PTR(-EFAULT);
177 total = rq->inputsize;
179 unsigned int page_copycnt =
180 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
183 inpage = kmap_atomic(*in);
184 memcpy(tmp, inpage + *inputmargin, page_copycnt);
185 kunmap_atomic(inpage);
188 total -= page_copycnt;
196 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
198 unsigned int inputmargin;
200 bool support_0padding;
203 DBG_BUGON(*rq->in == NULL);
204 headpage = kmap_atomic(*rq->in);
206 support_0padding = false;
208 /* decompression inplace is only safe when 0padding is enabled */
209 if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
210 support_0padding = true;
212 while (!headpage[inputmargin & ~PAGE_MASK])
213 if (!(++inputmargin & ~PAGE_MASK))
216 if (inputmargin >= rq->inputsize) {
217 kunmap_atomic(headpage);
222 rq->inputsize -= inputmargin;
223 src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
228 /* legacy format could compress extra data in a pcluster. */
229 if (rq->partial_decoding || !support_0padding)
230 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
231 rq->inputsize, rq->outputsize, rq->outputsize);
233 ret = LZ4_decompress_safe(src + inputmargin, out,
234 rq->inputsize, rq->outputsize);
236 if (ret != rq->outputsize) {
237 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
238 ret, rq->inputsize, inputmargin, rq->outputsize);
240 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
241 16, 1, src + inputmargin, rq->inputsize, true);
242 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
243 16, 1, out, rq->outputsize, true);
246 memset(out + ret, 0, rq->outputsize - ret);
252 } else if (maptype == 1) {
253 vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
254 } else if (maptype == 2) {
255 erofs_put_pcpubuf(src);
263 static struct z_erofs_decompressor decompressors[] = {
264 [Z_EROFS_COMPRESSION_SHIFTED] = {
267 [Z_EROFS_COMPRESSION_LZ4] = {
268 .prepare_destpages = z_erofs_lz4_prepare_destpages,
269 .decompress = z_erofs_lz4_decompress,
274 static void copy_from_pcpubuf(struct page **out, const char *dst,
275 unsigned short pageofs_out,
276 unsigned int outputsize)
278 const char *end = dst + outputsize;
279 const unsigned int righthalf = PAGE_SIZE - pageofs_out;
280 const char *cur = dst - pageofs_out;
283 struct page *const page = *out++;
286 char *buf = kmap_atomic(page);
289 memcpy(buf, cur, min_t(uint, PAGE_SIZE,
292 memcpy(buf + pageofs_out, cur + pageofs_out,
293 min_t(uint, righthalf, end - cur));
301 static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
302 struct list_head *pagepool)
304 const unsigned int nrpages_out =
305 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
306 const struct z_erofs_decompressor *alg = decompressors + rq->alg;
307 unsigned int dst_maptype;
311 /* two optimized fast paths only for non bigpcluster cases yet */
312 if (rq->inputsize <= PAGE_SIZE) {
313 if (nrpages_out == 1 && !rq->inplace_io) {
314 DBG_BUGON(!*rq->out);
315 dst = kmap_atomic(*rq->out);
321 * For the case of small output size (especially much less
322 * than PAGE_SIZE), memcpy the decompressed data rather than
323 * compressed data is preferred.
325 if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
326 dst = erofs_get_pcpubuf(1);
330 rq->inplace_io = false;
331 ret = alg->decompress(rq, dst);
333 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
336 erofs_put_pcpubuf(dst);
341 /* general decoding path which can be used for all cases */
342 ret = alg->prepare_destpages(rq, pagepool);
346 dst = page_address(*rq->out);
351 dst = erofs_vm_map_ram(rq->out, nrpages_out);
357 ret = alg->decompress(rq, dst + rq->pageofs_out);
361 else if (dst_maptype == 2)
362 vm_unmap_ram(dst, nrpages_out);
366 static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
367 struct list_head *pagepool)
369 const unsigned int nrpages_out =
370 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
371 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
372 unsigned char *src, *dst;
374 if (nrpages_out > 2) {
379 if (rq->out[0] == *rq->in) {
380 DBG_BUGON(nrpages_out != 1);
384 src = kmap_atomic(*rq->in);
386 dst = kmap_atomic(rq->out[0]);
387 memcpy(dst + rq->pageofs_out, src, righthalf);
391 if (nrpages_out == 2) {
392 DBG_BUGON(!rq->out[1]);
393 if (rq->out[1] == *rq->in) {
394 memmove(src, src + righthalf, rq->pageofs_out);
396 dst = kmap_atomic(rq->out[1]);
397 memcpy(dst, src + righthalf, rq->pageofs_out);
405 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
406 struct list_head *pagepool)
408 if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
409 return z_erofs_shifted_transform(rq, pagepool);
410 return z_erofs_decompress_generic(rq, pagepool);