1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
6 #ifndef __EROFS_FS_ZDATA_H
7 #define __EROFS_FS_ZDATA_H
12 #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_NR_INLINE_PAGEVECS 3
16 * Structure fields follow one of the following exclusion rules.
18 * I: Modifiable by initialization/destruction paths and read-only
21 * L: Field should be protected by pageset lock;
23 * A: Field should be accessed / updated in atomic for parallelized code.
25 struct z_erofs_collection {
28 /* I: page offset of start position of decompression */
29 unsigned short pageofs;
31 /* L: maximum relative page index in pagevec[] */
32 unsigned short nr_pages;
34 /* L: total number of pages in pagevec[] */
38 /* L: inline a certain number of pagevecs for bootstrap */
39 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
41 /* I: can be used to free the pcluster by RCU. */
46 #define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
47 #define Z_EROFS_PCLUSTER_LENGTH_BIT 1
50 * let's leave a type here in case of introducing
51 * another tagged pointer later.
53 typedef void *z_erofs_next_pcluster_t;
55 struct z_erofs_pcluster {
56 struct erofs_workgroup obj;
57 struct z_erofs_collection primary_collection;
59 /* A: point to next chained pcluster or TAILs */
60 z_erofs_next_pcluster_t next;
62 /* A: lower limit of decompressed length and if full length or not */
65 /* I: page offset of inline compressed data */
66 unsigned short pageofs_in;
69 /* I: physical cluster size in pages */
70 unsigned short pclusterpages;
72 /* I: tailpacking inline compressed size */
73 unsigned short tailpacking_size;
76 /* I: compression algorithm format */
77 unsigned char algorithmformat;
79 /* A: compressed pages (can be cached or inplaced pages) */
80 struct page *compressed_pages[];
83 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
85 /* let's avoid the valid 32-bit kernel addresses */
87 /* the chained workgroup has't submitted io (still open) */
88 #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
89 /* the chained workgroup has already submitted io */
90 #define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
92 #define Z_EROFS_PCLUSTER_NIL (NULL)
94 struct z_erofs_decompressqueue {
95 struct super_block *sb;
96 atomic_t pending_bios;
97 z_erofs_next_pcluster_t head;
100 wait_queue_head_t wait;
101 struct work_struct work;
105 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
107 return !pcl->obj.index;
110 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
112 if (z_erofs_is_inline_pcluster(pcl))
114 return pcl->pclusterpages;
117 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
118 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
119 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
122 * waiters (aka. ongoing_packs): # to unlock the page
123 * sub-index: 0 - for partial page, >= 1 full page sub-index
125 typedef atomic_t z_erofs_onlinepage_t;
128 union z_erofs_onlinepage_converter {
129 z_erofs_onlinepage_t *o;
133 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
135 union z_erofs_onlinepage_converter u;
137 DBG_BUGON(!PagePrivate(page));
138 u.v = &page_private(page);
140 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
143 static inline void z_erofs_onlinepage_init(struct page *page)
146 z_erofs_onlinepage_t o;
148 /* keep from being unlocked in advance */
149 } u = { .o = ATOMIC_INIT(1) };
151 set_page_private(page, u.v);
153 SetPagePrivate(page);
156 static inline void z_erofs_onlinepage_fixup(struct page *page,
157 uintptr_t index, bool down)
159 union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
160 int orig, orig_index, val;
163 orig = atomic_read(u.o);
164 orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
169 DBG_BUGON(orig_index != index);
172 val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
173 ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
174 if (atomic_cmpxchg(u.o, orig, val) != orig)
178 static inline void z_erofs_onlinepage_endio(struct page *page)
180 union z_erofs_onlinepage_converter u;
183 DBG_BUGON(!PagePrivate(page));
184 u.v = &page_private(page);
186 v = atomic_dec_return(u.o);
187 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
188 set_page_private(page, 0);
189 ClearPagePrivate(page);
190 if (!PageError(page))
191 SetPageUptodate(page);
194 erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
197 #define Z_EROFS_VMAP_ONSTACK_PAGES \
198 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
199 #define Z_EROFS_VMAP_GLOBAL_PAGES 2048