1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/moduleparam.h>
11 #include <linux/writeback.h>
12 #include <linux/backing-dev.h>
13 #include <linux/lzo.h>
14 #include <linux/lz4.h>
15 #include <linux/zstd.h>
16 #include <linux/pagevec.h>
21 #include <trace/events/f2fs.h>
23 static struct kmem_cache *cic_entry_slab;
24 static struct kmem_cache *dic_entry_slab;
26 static void *page_array_alloc(struct inode *inode, int nr)
28 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 unsigned int size = sizeof(struct page *) * nr;
31 if (likely(size <= sbi->page_array_slab_size))
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
37 static void page_array_free(struct inode *inode, void *pages, int nr)
39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 unsigned int size = sizeof(struct page *) * nr;
45 if (likely(size <= sbi->page_array_slab_size))
46 kmem_cache_free(sbi->page_array_slab, pages);
51 struct f2fs_compress_ops {
52 int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
55 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
57 int (*decompress_pages)(struct decompress_io_ctx *dic);
60 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
62 return index & (cc->cluster_size - 1);
65 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
67 return index >> cc->log_cluster_size;
70 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
72 return cc->cluster_idx << cc->log_cluster_size;
75 bool f2fs_is_compressed_page(struct page *page)
77 if (!PagePrivate(page))
79 if (!page_private(page))
81 if (page_private_nonpointer(page))
84 f2fs_bug_on(F2FS_M_SB(page->mapping),
85 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
89 static void f2fs_set_compressed_page(struct page *page,
90 struct inode *inode, pgoff_t index, void *data)
92 attach_page_private(page, (void *)data);
94 /* i_crypto_info and iv index */
96 page->mapping = inode->i_mapping;
99 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
103 for (i = 0; i < len; i++) {
107 unlock_page(cc->rpages[i]);
109 put_page(cc->rpages[i]);
113 static void f2fs_put_rpages(struct compress_ctx *cc)
115 f2fs_drop_rpages(cc, cc->cluster_size, false);
118 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
120 f2fs_drop_rpages(cc, len, true);
123 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
124 struct writeback_control *wbc, bool redirty, int unlock)
128 for (i = 0; i < cc->cluster_size; i++) {
132 redirty_page_for_writepage(wbc, cc->rpages[i]);
133 f2fs_put_page(cc->rpages[i], unlock);
137 struct page *f2fs_compress_control_page(struct page *page)
139 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
142 int f2fs_init_compress_ctx(struct compress_ctx *cc)
147 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
148 return cc->rpages ? 0 : -ENOMEM;
151 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
153 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
158 cc->cluster_idx = NULL_CLUSTER;
161 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
163 unsigned int cluster_ofs;
165 if (!f2fs_cluster_can_merge_page(cc, page->index))
166 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
168 cluster_ofs = offset_in_cluster(cc, page->index);
169 cc->rpages[cluster_ofs] = page;
171 cc->cluster_idx = cluster_idx(cc, page->index);
174 #ifdef CONFIG_F2FS_FS_LZO
175 static int lzo_init_compress_ctx(struct compress_ctx *cc)
177 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
178 LZO1X_MEM_COMPRESS, GFP_NOFS);
182 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
186 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
192 static int lzo_compress_pages(struct compress_ctx *cc)
196 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
197 &cc->clen, cc->private);
198 if (ret != LZO_E_OK) {
199 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
200 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
206 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
210 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
211 dic->rbuf, &dic->rlen);
212 if (ret != LZO_E_OK) {
213 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
214 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
218 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
219 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
220 "expected:%lu\n", KERN_ERR,
221 F2FS_I_SB(dic->inode)->sb->s_id,
223 PAGE_SIZE << dic->log_cluster_size);
229 static const struct f2fs_compress_ops f2fs_lzo_ops = {
230 .init_compress_ctx = lzo_init_compress_ctx,
231 .destroy_compress_ctx = lzo_destroy_compress_ctx,
232 .compress_pages = lzo_compress_pages,
233 .decompress_pages = lzo_decompress_pages,
237 #ifdef CONFIG_F2FS_FS_LZ4
238 static int lz4_init_compress_ctx(struct compress_ctx *cc)
240 unsigned int size = LZ4_MEM_COMPRESS;
242 #ifdef CONFIG_F2FS_FS_LZ4HC
243 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
244 size = LZ4HC_MEM_COMPRESS;
247 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
252 * we do not change cc->clen to LZ4_compressBound(inputsize) to
253 * adapt worst compress case, because lz4 compressor can handle
254 * output budget properly.
256 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
260 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
266 #ifdef CONFIG_F2FS_FS_LZ4HC
267 static int lz4hc_compress_pages(struct compress_ctx *cc)
269 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
270 COMPRESS_LEVEL_OFFSET;
274 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275 cc->clen, level, cc->private);
277 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
278 cc->clen, cc->private);
287 static int lz4_compress_pages(struct compress_ctx *cc)
291 #ifdef CONFIG_F2FS_FS_LZ4HC
292 return lz4hc_compress_pages(cc);
294 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
295 cc->clen, cc->private);
303 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
307 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
308 dic->clen, dic->rlen);
310 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
311 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
315 if (ret != PAGE_SIZE << dic->log_cluster_size) {
316 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
317 "expected:%lu\n", KERN_ERR,
318 F2FS_I_SB(dic->inode)->sb->s_id,
320 PAGE_SIZE << dic->log_cluster_size);
326 static const struct f2fs_compress_ops f2fs_lz4_ops = {
327 .init_compress_ctx = lz4_init_compress_ctx,
328 .destroy_compress_ctx = lz4_destroy_compress_ctx,
329 .compress_pages = lz4_compress_pages,
330 .decompress_pages = lz4_decompress_pages,
334 #ifdef CONFIG_F2FS_FS_ZSTD
335 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
337 static int zstd_init_compress_ctx(struct compress_ctx *cc)
339 ZSTD_parameters params;
340 ZSTD_CStream *stream;
342 unsigned int workspace_size;
343 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
344 COMPRESS_LEVEL_OFFSET;
347 level = F2FS_ZSTD_DEFAULT_CLEVEL;
349 params = ZSTD_getParams(level, cc->rlen, 0);
350 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
352 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
353 workspace_size, GFP_NOFS);
357 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
359 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
360 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
366 cc->private = workspace;
367 cc->private2 = stream;
369 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
373 static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
380 static int zstd_compress_pages(struct compress_ctx *cc)
382 ZSTD_CStream *stream = cc->private2;
384 ZSTD_outBuffer outbuf;
385 int src_size = cc->rlen;
386 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
390 inbuf.src = cc->rbuf;
391 inbuf.size = src_size;
394 outbuf.dst = cc->cbuf->cdata;
395 outbuf.size = dst_size;
397 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
398 if (ZSTD_isError(ret)) {
399 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
400 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
401 __func__, ZSTD_getErrorCode(ret));
405 ret = ZSTD_endStream(stream, &outbuf);
406 if (ZSTD_isError(ret)) {
407 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
408 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
409 __func__, ZSTD_getErrorCode(ret));
414 * there is compressed data remained in intermediate buffer due to
415 * no more space in cbuf.cdata
420 cc->clen = outbuf.pos;
424 static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
426 ZSTD_DStream *stream;
428 unsigned int workspace_size;
429 unsigned int max_window_size =
430 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
432 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
434 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
435 workspace_size, GFP_NOFS);
439 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
441 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
442 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
448 dic->private = workspace;
449 dic->private2 = stream;
454 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
456 kvfree(dic->private);
458 dic->private2 = NULL;
461 static int zstd_decompress_pages(struct decompress_io_ctx *dic)
463 ZSTD_DStream *stream = dic->private2;
465 ZSTD_outBuffer outbuf;
469 inbuf.src = dic->cbuf->cdata;
470 inbuf.size = dic->clen;
473 outbuf.dst = dic->rbuf;
474 outbuf.size = dic->rlen;
476 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
477 if (ZSTD_isError(ret)) {
478 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
479 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
480 __func__, ZSTD_getErrorCode(ret));
484 if (dic->rlen != outbuf.pos) {
485 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
486 "expected:%lu\n", KERN_ERR,
487 F2FS_I_SB(dic->inode)->sb->s_id,
489 PAGE_SIZE << dic->log_cluster_size);
496 static const struct f2fs_compress_ops f2fs_zstd_ops = {
497 .init_compress_ctx = zstd_init_compress_ctx,
498 .destroy_compress_ctx = zstd_destroy_compress_ctx,
499 .compress_pages = zstd_compress_pages,
500 .init_decompress_ctx = zstd_init_decompress_ctx,
501 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
502 .decompress_pages = zstd_decompress_pages,
506 #ifdef CONFIG_F2FS_FS_LZO
507 #ifdef CONFIG_F2FS_FS_LZORLE
508 static int lzorle_compress_pages(struct compress_ctx *cc)
512 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
513 &cc->clen, cc->private);
514 if (ret != LZO_E_OK) {
515 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
516 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
522 static const struct f2fs_compress_ops f2fs_lzorle_ops = {
523 .init_compress_ctx = lzo_init_compress_ctx,
524 .destroy_compress_ctx = lzo_destroy_compress_ctx,
525 .compress_pages = lzorle_compress_pages,
526 .decompress_pages = lzo_decompress_pages,
531 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
532 #ifdef CONFIG_F2FS_FS_LZO
537 #ifdef CONFIG_F2FS_FS_LZ4
542 #ifdef CONFIG_F2FS_FS_ZSTD
547 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
554 bool f2fs_is_compress_backend_ready(struct inode *inode)
556 if (!f2fs_compressed_file(inode))
558 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
561 static mempool_t *compress_page_pool;
562 static int num_compress_pages = 512;
563 module_param(num_compress_pages, uint, 0444);
564 MODULE_PARM_DESC(num_compress_pages,
565 "Number of intermediate compress pages to preallocate");
567 int f2fs_init_compress_mempool(void)
569 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
570 if (!compress_page_pool)
576 void f2fs_destroy_compress_mempool(void)
578 mempool_destroy(compress_page_pool);
581 static struct page *f2fs_compress_alloc_page(void)
585 page = mempool_alloc(compress_page_pool, GFP_NOFS);
591 static void f2fs_compress_free_page(struct page *page)
595 detach_page_private(page);
596 page->mapping = NULL;
598 mempool_free(page, compress_page_pool);
601 #define MAX_VMAP_RETRIES 3
603 static void *f2fs_vmap(struct page **pages, unsigned int count)
608 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
609 buf = vm_map_ram(pages, count, -1);
617 static int f2fs_compress_pages(struct compress_ctx *cc)
619 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
620 const struct f2fs_compress_ops *cops =
621 f2fs_cops[fi->i_compress_algorithm];
622 unsigned int max_len, new_nr_cpages;
623 struct page **new_cpages;
627 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
628 cc->cluster_size, fi->i_compress_algorithm);
630 if (cops->init_compress_ctx) {
631 ret = cops->init_compress_ctx(cc);
636 max_len = COMPRESS_HEADER_SIZE + cc->clen;
637 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
639 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
642 goto destroy_compress_ctx;
645 for (i = 0; i < cc->nr_cpages; i++) {
646 cc->cpages[i] = f2fs_compress_alloc_page();
647 if (!cc->cpages[i]) {
649 goto out_free_cpages;
653 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
656 goto out_free_cpages;
659 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
662 goto out_vunmap_rbuf;
665 ret = cops->compress_pages(cc);
667 goto out_vunmap_cbuf;
669 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
671 if (cc->clen > max_len) {
673 goto out_vunmap_cbuf;
676 cc->cbuf->clen = cpu_to_le32(cc->clen);
678 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
679 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
680 cc->cbuf->cdata, cc->clen);
681 cc->cbuf->chksum = cpu_to_le32(chksum);
683 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
684 cc->cbuf->reserved[i] = cpu_to_le32(0);
686 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
688 /* Now we're going to cut unnecessary tail pages */
689 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
692 goto out_vunmap_cbuf;
695 /* zero out any unused part of the last page */
696 memset(&cc->cbuf->cdata[cc->clen], 0,
697 (new_nr_cpages * PAGE_SIZE) -
698 (cc->clen + COMPRESS_HEADER_SIZE));
700 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
701 vm_unmap_ram(cc->rbuf, cc->cluster_size);
703 for (i = 0; i < cc->nr_cpages; i++) {
704 if (i < new_nr_cpages) {
705 new_cpages[i] = cc->cpages[i];
708 f2fs_compress_free_page(cc->cpages[i]);
709 cc->cpages[i] = NULL;
712 if (cops->destroy_compress_ctx)
713 cops->destroy_compress_ctx(cc);
715 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
716 cc->cpages = new_cpages;
717 cc->nr_cpages = new_nr_cpages;
719 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
724 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
726 vm_unmap_ram(cc->rbuf, cc->cluster_size);
728 for (i = 0; i < cc->nr_cpages; i++) {
730 f2fs_compress_free_page(cc->cpages[i]);
732 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
734 destroy_compress_ctx:
735 if (cops->destroy_compress_ctx)
736 cops->destroy_compress_ctx(cc);
738 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
743 void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
745 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
746 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
747 const struct f2fs_compress_ops *cops =
748 f2fs_cops[fi->i_compress_algorithm];
752 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
753 dic->cluster_size, fi->i_compress_algorithm);
760 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
766 for (i = 0; i < dic->cluster_size; i++) {
767 if (dic->rpages[i]) {
768 dic->tpages[i] = dic->rpages[i];
772 dic->tpages[i] = f2fs_compress_alloc_page();
773 if (!dic->tpages[i]) {
779 if (cops->init_decompress_ctx) {
780 ret = cops->init_decompress_ctx(dic);
785 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
788 goto out_destroy_decompress_ctx;
791 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
794 goto out_vunmap_rbuf;
797 dic->clen = le32_to_cpu(dic->cbuf->clen);
798 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
800 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
802 goto out_vunmap_cbuf;
805 ret = cops->decompress_pages(dic);
807 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
808 u32 provided = le32_to_cpu(dic->cbuf->chksum);
809 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
811 if (provided != calculated) {
812 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
813 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
815 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
816 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
817 provided, calculated);
819 set_sbi_flag(sbi, SBI_NEED_FSCK);
824 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
826 vm_unmap_ram(dic->rbuf, dic->cluster_size);
827 out_destroy_decompress_ctx:
828 if (cops->destroy_decompress_ctx)
829 cops->destroy_decompress_ctx(dic);
831 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
833 f2fs_decompress_end_io(dic, ret);
837 * This is called when a page of a compressed cluster has been read from disk
838 * (or failed to be read from disk). It checks whether this page was the last
839 * page being waited on in the cluster, and if so, it decompresses the cluster
840 * (or in the case of a failure, cleans up without actually decompressing).
842 void f2fs_end_read_compressed_page(struct page *page, bool failed,
845 struct decompress_io_ctx *dic =
846 (struct decompress_io_ctx *)page_private(page);
847 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
849 dec_page_count(sbi, F2FS_RD_DATA);
852 WRITE_ONCE(dic->failed, true);
854 f2fs_cache_compressed_page(sbi, page,
855 dic->inode->i_ino, blkaddr);
857 if (atomic_dec_and_test(&dic->remaining_pages))
858 f2fs_decompress_cluster(dic);
861 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
863 if (cc->cluster_idx == NULL_CLUSTER)
865 return cc->cluster_idx == cluster_idx(cc, index);
868 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
870 return cc->nr_rpages == 0;
873 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
875 return cc->cluster_size == cc->nr_rpages;
878 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
880 if (f2fs_cluster_is_empty(cc))
882 return is_page_in_cluster(cc, index);
885 bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
886 int index, int nr_pages)
891 if (nr_pages - index < cc->cluster_size)
894 pgidx = pvec->pages[index]->index;
896 for (i = 1; i < cc->cluster_size; i++) {
897 if (pvec->pages[index + i]->index != pgidx + i)
904 static bool cluster_has_invalid_data(struct compress_ctx *cc)
906 loff_t i_size = i_size_read(cc->inode);
907 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
910 for (i = 0; i < cc->cluster_size; i++) {
911 struct page *page = cc->rpages[i];
913 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
916 if (page->index >= nr_pages)
922 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
924 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
925 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
926 bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
934 /* [..., COMPR_ADDR, ...] */
935 if (dn->ofs_in_node % cluster_size) {
936 reason = "[*|C|*|*]";
940 for (i = 1; i < cluster_size; i++) {
941 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
942 dn->ofs_in_node + i);
944 /* [COMPR_ADDR, ..., COMPR_ADDR] */
945 if (blkaddr == COMPRESS_ADDR) {
946 reason = "[C|*|C|*]";
950 if (!__is_valid_data_blkaddr(blkaddr)) {
955 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
957 reason = "[C|N|N|V]";
964 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
965 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
966 set_sbi_flag(sbi, SBI_NEED_FSCK);
970 static int __f2fs_cluster_blocks(struct inode *inode,
971 unsigned int cluster_idx, bool compr)
973 struct dnode_of_data dn;
974 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
975 unsigned int start_idx = cluster_idx <<
976 F2FS_I(inode)->i_log_cluster_size;
979 set_new_dnode(&dn, inode, NULL, NULL, 0);
980 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
987 if (f2fs_sanity_check_cluster(&dn)) {
992 if (dn.data_blkaddr == COMPRESS_ADDR) {
996 for (i = 1; i < cluster_size; i++) {
999 blkaddr = data_blkaddr(dn.inode,
1000 dn.node_page, dn.ofs_in_node + i);
1002 if (__is_valid_data_blkaddr(blkaddr))
1005 if (blkaddr != NULL_ADDR)
1010 f2fs_bug_on(F2FS_I_SB(inode),
1011 !compr && ret != cluster_size &&
1012 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
1015 f2fs_put_dnode(&dn);
1019 /* return # of compressed blocks in compressed cluster */
1020 static int f2fs_compressed_blocks(struct compress_ctx *cc)
1022 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1025 /* return # of valid blocks in compressed cluster */
1026 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
1028 return __f2fs_cluster_blocks(inode,
1029 index >> F2FS_I(inode)->i_log_cluster_size,
1033 static bool cluster_may_compress(struct compress_ctx *cc)
1035 if (!f2fs_need_compress_data(cc->inode))
1037 if (f2fs_is_atomic_file(cc->inode))
1039 if (!f2fs_cluster_is_full(cc))
1041 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
1043 return !cluster_has_invalid_data(cc);
1046 static void set_cluster_writeback(struct compress_ctx *cc)
1050 for (i = 0; i < cc->cluster_size; i++) {
1052 set_page_writeback(cc->rpages[i]);
1056 static void set_cluster_dirty(struct compress_ctx *cc)
1060 for (i = 0; i < cc->cluster_size; i++)
1062 set_page_dirty(cc->rpages[i]);
1065 static int prepare_compress_overwrite(struct compress_ctx *cc,
1066 struct page **pagep, pgoff_t index, void **fsdata)
1068 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1069 struct address_space *mapping = cc->inode->i_mapping;
1071 sector_t last_block_in_bio;
1072 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1073 pgoff_t start_idx = start_idx_of_cluster(cc);
1077 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
1081 ret = f2fs_init_compress_ctx(cc);
1085 /* keep page reference to avoid page reclaim */
1086 for (i = 0; i < cc->cluster_size; i++) {
1087 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1088 fgp_flag, GFP_NOFS);
1094 if (PageUptodate(page))
1095 f2fs_put_page(page, 1);
1097 f2fs_compress_ctx_add_page(cc, page);
1100 if (!f2fs_cluster_is_empty(cc)) {
1101 struct bio *bio = NULL;
1103 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
1104 &last_block_in_bio, false, true);
1105 f2fs_put_rpages(cc);
1106 f2fs_destroy_compress_ctx(cc, true);
1110 f2fs_submit_bio(sbi, bio, DATA);
1112 ret = f2fs_init_compress_ctx(cc);
1117 for (i = 0; i < cc->cluster_size; i++) {
1118 f2fs_bug_on(sbi, cc->rpages[i]);
1120 page = find_lock_page(mapping, start_idx + i);
1122 /* page can be truncated */
1123 goto release_and_retry;
1126 f2fs_wait_on_page_writeback(page, DATA, true, true);
1127 f2fs_compress_ctx_add_page(cc, page);
1129 if (!PageUptodate(page)) {
1131 f2fs_put_rpages(cc);
1132 f2fs_unlock_rpages(cc, i + 1);
1133 f2fs_destroy_compress_ctx(cc, true);
1139 *fsdata = cc->rpages;
1140 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1141 return cc->cluster_size;
1145 f2fs_put_rpages(cc);
1146 f2fs_unlock_rpages(cc, i);
1147 f2fs_destroy_compress_ctx(cc, true);
1152 int f2fs_prepare_compress_overwrite(struct inode *inode,
1153 struct page **pagep, pgoff_t index, void **fsdata)
1155 struct compress_ctx cc = {
1157 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1158 .cluster_size = F2FS_I(inode)->i_cluster_size,
1159 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1164 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1167 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1168 pgoff_t index, unsigned copied)
1171 struct compress_ctx cc = {
1173 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1174 .cluster_size = F2FS_I(inode)->i_cluster_size,
1177 bool first_index = (index == cc.rpages[0]->index);
1180 set_cluster_dirty(&cc);
1182 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
1183 f2fs_destroy_compress_ctx(&cc, false);
1188 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1190 void *fsdata = NULL;
1192 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1193 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1197 err = f2fs_is_compressed_cluster(inode, start_idx);
1201 /* truncate normal cluster */
1203 return f2fs_do_truncate_blocks(inode, from, lock);
1205 /* truncate compressed cluster */
1206 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1207 start_idx, &fsdata);
1209 /* should not be a normal cluster */
1210 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1216 struct page **rpages = fsdata;
1217 int cluster_size = F2FS_I(inode)->i_cluster_size;
1220 for (i = cluster_size - 1; i >= 0; i--) {
1221 loff_t start = rpages[i]->index << PAGE_SHIFT;
1223 if (from <= start) {
1224 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1226 zero_user_segment(rpages[i], from - start,
1232 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1237 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1239 struct writeback_control *wbc,
1240 enum iostat_type io_type)
1242 struct inode *inode = cc->inode;
1243 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1244 struct f2fs_inode_info *fi = F2FS_I(inode);
1245 struct f2fs_io_info fio = {
1247 .ino = cc->inode->i_ino,
1250 .op_flags = wbc_to_write_flags(wbc),
1251 .old_blkaddr = NEW_ADDR,
1253 .encrypted_page = NULL,
1254 .compressed_page = NULL,
1258 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
1260 struct dnode_of_data dn;
1261 struct node_info ni;
1262 struct compress_io_ctx *cic;
1263 pgoff_t start_idx = start_idx_of_cluster(cc);
1264 unsigned int last_index = cc->cluster_size - 1;
1268 /* we should bypass data pages to proceed the kworkder jobs */
1269 if (unlikely(f2fs_cp_error(sbi))) {
1270 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1274 if (IS_NOQUOTA(inode)) {
1276 * We need to wait for node_write to avoid block allocation during
1277 * checkpoint. This can only happen to quota writes which can cause
1278 * the below discard race condition.
1280 down_read(&sbi->node_write);
1281 } else if (!f2fs_trylock_op(sbi)) {
1285 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1287 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1291 for (i = 0; i < cc->cluster_size; i++) {
1292 if (data_blkaddr(dn.inode, dn.node_page,
1293 dn.ofs_in_node + i) == NULL_ADDR)
1297 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1299 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1303 fio.version = ni.version;
1305 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
1309 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1311 atomic_set(&cic->pending_pages, cc->nr_cpages);
1312 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1316 cic->nr_rpages = cc->cluster_size;
1318 for (i = 0; i < cc->nr_cpages; i++) {
1319 f2fs_set_compressed_page(cc->cpages[i], inode,
1320 cc->rpages[i + 1]->index, cic);
1321 fio.compressed_page = cc->cpages[i];
1323 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1324 dn.ofs_in_node + i + 1);
1326 /* wait for GCed page writeback via META_MAPPING */
1327 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1329 if (fio.encrypted) {
1330 fio.page = cc->rpages[i + 1];
1331 err = f2fs_encrypt_one_page(&fio);
1333 goto out_destroy_crypt;
1334 cc->cpages[i] = fio.encrypted_page;
1338 set_cluster_writeback(cc);
1340 for (i = 0; i < cc->cluster_size; i++)
1341 cic->rpages[i] = cc->rpages[i];
1343 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1346 blkaddr = f2fs_data_blkaddr(&dn);
1347 fio.page = cc->rpages[i];
1348 fio.old_blkaddr = blkaddr;
1350 /* cluster header */
1352 if (blkaddr == COMPRESS_ADDR)
1354 if (__is_valid_data_blkaddr(blkaddr))
1355 f2fs_invalidate_blocks(sbi, blkaddr);
1356 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1357 goto unlock_continue;
1360 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1363 if (i > cc->nr_cpages) {
1364 if (__is_valid_data_blkaddr(blkaddr)) {
1365 f2fs_invalidate_blocks(sbi, blkaddr);
1366 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1368 goto unlock_continue;
1371 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1374 fio.encrypted_page = cc->cpages[i - 1];
1376 fio.compressed_page = cc->cpages[i - 1];
1378 cc->cpages[i - 1] = NULL;
1379 f2fs_outplace_write_data(&dn, &fio);
1382 inode_dec_dirty_pages(cc->inode);
1383 unlock_page(fio.page);
1386 if (fio.compr_blocks)
1387 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1388 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
1389 add_compr_block_stat(inode, cc->nr_cpages);
1391 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1392 if (cc->cluster_idx == 0)
1393 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1395 f2fs_put_dnode(&dn);
1396 if (IS_NOQUOTA(inode))
1397 up_read(&sbi->node_write);
1399 f2fs_unlock_op(sbi);
1401 spin_lock(&fi->i_size_lock);
1402 if (fi->last_disk_size < psize)
1403 fi->last_disk_size = psize;
1404 spin_unlock(&fi->i_size_lock);
1406 f2fs_put_rpages(cc);
1407 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1409 f2fs_destroy_compress_ctx(cc, false);
1413 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
1415 for (--i; i >= 0; i--)
1416 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1418 kmem_cache_free(cic_entry_slab, cic);
1420 f2fs_put_dnode(&dn);
1422 if (IS_NOQUOTA(inode))
1423 up_read(&sbi->node_write);
1425 f2fs_unlock_op(sbi);
1427 for (i = 0; i < cc->nr_cpages; i++) {
1430 f2fs_compress_free_page(cc->cpages[i]);
1431 cc->cpages[i] = NULL;
1433 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1438 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1440 struct f2fs_sb_info *sbi = bio->bi_private;
1441 struct compress_io_ctx *cic =
1442 (struct compress_io_ctx *)page_private(page);
1445 if (unlikely(bio->bi_status))
1446 mapping_set_error(cic->inode->i_mapping, -EIO);
1448 f2fs_compress_free_page(page);
1450 dec_page_count(sbi, F2FS_WB_DATA);
1452 if (atomic_dec_return(&cic->pending_pages))
1455 for (i = 0; i < cic->nr_rpages; i++) {
1456 WARN_ON(!cic->rpages[i]);
1457 clear_page_private_gcing(cic->rpages[i]);
1458 end_page_writeback(cic->rpages[i]);
1461 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
1462 kmem_cache_free(cic_entry_slab, cic);
1465 static int f2fs_write_raw_pages(struct compress_ctx *cc,
1467 struct writeback_control *wbc,
1468 enum iostat_type io_type)
1470 struct address_space *mapping = cc->inode->i_mapping;
1471 int _submitted, compr_blocks, ret;
1472 int i = -1, err = 0;
1474 compr_blocks = f2fs_compressed_blocks(cc);
1475 if (compr_blocks < 0) {
1480 for (i = 0; i < cc->cluster_size; i++) {
1484 if (cc->rpages[i]->mapping != mapping) {
1485 unlock_page(cc->rpages[i]);
1489 BUG_ON(!PageLocked(cc->rpages[i]));
1491 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1492 NULL, NULL, wbc, io_type,
1493 compr_blocks, false);
1495 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1496 unlock_page(cc->rpages[i]);
1498 } else if (ret == -EAGAIN) {
1500 * for quota file, just redirty left pages to
1501 * avoid deadlock caused by cluster update race
1502 * from foreground operation.
1504 if (IS_NOQUOTA(cc->inode)) {
1510 congestion_wait(BLK_RW_ASYNC,
1511 DEFAULT_IO_TIMEOUT);
1512 lock_page(cc->rpages[i]);
1514 if (!PageDirty(cc->rpages[i])) {
1515 unlock_page(cc->rpages[i]);
1519 clear_page_dirty_for_io(cc->rpages[i]);
1526 *submitted += _submitted;
1529 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1533 for (++i; i < cc->cluster_size; i++) {
1536 redirty_page_for_writepage(wbc, cc->rpages[i]);
1537 unlock_page(cc->rpages[i]);
1542 int f2fs_write_multi_pages(struct compress_ctx *cc,
1544 struct writeback_control *wbc,
1545 enum iostat_type io_type)
1550 if (cluster_may_compress(cc)) {
1551 err = f2fs_compress_pages(cc);
1552 if (err == -EAGAIN) {
1553 add_compr_block_stat(cc->inode, cc->cluster_size);
1556 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1560 err = f2fs_write_compressed_pages(cc, submitted,
1564 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1567 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1569 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1570 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1572 f2fs_destroy_compress_ctx(cc, false);
1576 static void f2fs_free_dic(struct decompress_io_ctx *dic);
1578 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1580 struct decompress_io_ctx *dic;
1581 pgoff_t start_idx = start_idx_of_cluster(cc);
1584 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
1585 false, F2FS_I_SB(cc->inode));
1587 return ERR_PTR(-ENOMEM);
1589 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
1591 kmem_cache_free(dic_entry_slab, dic);
1592 return ERR_PTR(-ENOMEM);
1595 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1596 dic->inode = cc->inode;
1597 atomic_set(&dic->remaining_pages, cc->nr_cpages);
1598 dic->cluster_idx = cc->cluster_idx;
1599 dic->cluster_size = cc->cluster_size;
1600 dic->log_cluster_size = cc->log_cluster_size;
1601 dic->nr_cpages = cc->nr_cpages;
1602 refcount_set(&dic->refcnt, 1);
1603 dic->failed = false;
1604 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
1606 for (i = 0; i < dic->cluster_size; i++)
1607 dic->rpages[i] = cc->rpages[i];
1608 dic->nr_rpages = cc->cluster_size;
1610 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
1614 for (i = 0; i < dic->nr_cpages; i++) {
1617 page = f2fs_compress_alloc_page();
1621 f2fs_set_compressed_page(page, cc->inode,
1622 start_idx + i + 1, dic);
1623 dic->cpages[i] = page;
1630 return ERR_PTR(-ENOMEM);
1633 static void f2fs_free_dic(struct decompress_io_ctx *dic)
1638 for (i = 0; i < dic->cluster_size; i++) {
1641 if (!dic->tpages[i])
1643 f2fs_compress_free_page(dic->tpages[i]);
1645 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
1649 for (i = 0; i < dic->nr_cpages; i++) {
1650 if (!dic->cpages[i])
1652 f2fs_compress_free_page(dic->cpages[i]);
1654 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
1657 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
1658 kmem_cache_free(dic_entry_slab, dic);
1661 static void f2fs_put_dic(struct decompress_io_ctx *dic)
1663 if (refcount_dec_and_test(&dic->refcnt))
1668 * Update and unlock the cluster's pagecache pages, and release the reference to
1669 * the decompress_io_ctx that was being held for I/O completion.
1671 static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1675 for (i = 0; i < dic->cluster_size; i++) {
1676 struct page *rpage = dic->rpages[i];
1681 /* PG_error was set if verity failed. */
1682 if (failed || PageError(rpage)) {
1683 ClearPageUptodate(rpage);
1684 /* will re-read again later */
1685 ClearPageError(rpage);
1687 SetPageUptodate(rpage);
1695 static void f2fs_verify_cluster(struct work_struct *work)
1697 struct decompress_io_ctx *dic =
1698 container_of(work, struct decompress_io_ctx, verity_work);
1701 /* Verify the cluster's decompressed pages with fs-verity. */
1702 for (i = 0; i < dic->cluster_size; i++) {
1703 struct page *rpage = dic->rpages[i];
1705 if (rpage && !fsverity_verify_page(rpage))
1706 SetPageError(rpage);
1709 __f2fs_decompress_end_io(dic, false);
1713 * This is called when a compressed cluster has been decompressed
1714 * (or failed to be read and/or decompressed).
1716 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1718 if (!failed && dic->need_verity) {
1720 * Note that to avoid deadlocks, the verity work can't be done
1721 * on the decompression workqueue. This is because verifying
1722 * the data pages can involve reading metadata pages from the
1723 * file, and these metadata pages may be compressed.
1725 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1726 fsverity_enqueue_verify_work(&dic->verity_work);
1728 __f2fs_decompress_end_io(dic, failed);
1733 * Put a reference to a compressed page's decompress_io_ctx.
1735 * This is called when the page is no longer needed and can be freed.
1737 void f2fs_put_page_dic(struct page *page)
1739 struct decompress_io_ctx *dic =
1740 (struct decompress_io_ctx *)page_private(page);
1746 * check whether cluster blocks are contiguous, and add extent cache entry
1747 * only if cluster blocks are logically and physically contiguous.
1749 unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1751 bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1752 int i = compressed ? 1 : 0;
1753 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1754 dn->ofs_in_node + i);
1756 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1757 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1758 dn->ofs_in_node + i);
1760 if (!__is_valid_data_blkaddr(blkaddr))
1762 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1766 return compressed ? i - 1 : i;
1769 const struct address_space_operations f2fs_compress_aops = {
1770 .releasepage = f2fs_release_page,
1771 .invalidatepage = f2fs_invalidate_page,
1774 struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1776 return sbi->compress_inode->i_mapping;
1779 void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1781 if (!sbi->compress_inode)
1783 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1786 void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1787 nid_t ino, block_t blkaddr)
1792 if (!test_opt(sbi, COMPRESS_CACHE))
1795 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1798 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1801 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1803 f2fs_put_page(cpage, 0);
1807 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1811 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1814 f2fs_put_page(cpage, 0);
1818 set_page_private_data(cpage, ino);
1820 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1823 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1824 SetPageUptodate(cpage);
1826 f2fs_put_page(cpage, 1);
1829 bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1833 bool hitted = false;
1835 if (!test_opt(sbi, COMPRESS_CACHE))
1838 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1839 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1841 if (PageUptodate(cpage)) {
1842 atomic_inc(&sbi->compress_page_hit);
1843 memcpy(page_address(page),
1844 page_address(cpage), PAGE_SIZE);
1847 f2fs_put_page(cpage, 1);
1853 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1855 struct address_space *mapping = sbi->compress_inode->i_mapping;
1856 struct pagevec pvec;
1858 pgoff_t end = MAX_BLKADDR(sbi);
1860 if (!mapping->nrpages)
1863 pagevec_init(&pvec);
1866 unsigned int nr_pages;
1869 nr_pages = pagevec_lookup_range(&pvec, mapping,
1874 for (i = 0; i < nr_pages; i++) {
1875 struct page *page = pvec.pages[i];
1877 if (page->index > end)
1881 if (page->mapping != mapping) {
1886 if (ino != get_page_private_data(page)) {
1891 generic_error_remove_page(mapping, page);
1894 pagevec_release(&pvec);
1896 } while (index < end);
1899 int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1901 struct inode *inode;
1903 if (!test_opt(sbi, COMPRESS_CACHE))
1906 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1908 return PTR_ERR(inode);
1909 sbi->compress_inode = inode;
1911 sbi->compress_percent = COMPRESS_PERCENT;
1912 sbi->compress_watermark = COMPRESS_WATERMARK;
1914 atomic_set(&sbi->compress_page_hit, 0);
1919 void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1921 if (!sbi->compress_inode)
1923 iput(sbi->compress_inode);
1924 sbi->compress_inode = NULL;
1927 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1929 dev_t dev = sbi->sb->s_bdev->bd_dev;
1932 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1934 sbi->page_array_slab_size = sizeof(struct page *) <<
1935 F2FS_OPTION(sbi).compress_log_size;
1937 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1938 sbi->page_array_slab_size);
1939 if (!sbi->page_array_slab)
1944 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1946 kmem_cache_destroy(sbi->page_array_slab);
1949 static int __init f2fs_init_cic_cache(void)
1951 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1952 sizeof(struct compress_io_ctx));
1953 if (!cic_entry_slab)
1958 static void f2fs_destroy_cic_cache(void)
1960 kmem_cache_destroy(cic_entry_slab);
1963 static int __init f2fs_init_dic_cache(void)
1965 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1966 sizeof(struct decompress_io_ctx));
1967 if (!dic_entry_slab)
1972 static void f2fs_destroy_dic_cache(void)
1974 kmem_cache_destroy(dic_entry_slab);
1977 int __init f2fs_init_compress_cache(void)
1981 err = f2fs_init_cic_cache();
1984 err = f2fs_init_dic_cache();
1989 f2fs_destroy_cic_cache();
1994 void f2fs_destroy_compress_cache(void)
1996 f2fs_destroy_dic_cache();
1997 f2fs_destroy_cic_cache();