1 // SPDX-License-Identifier: GPL-2.0+
3 * NILFS direct block pointer.
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Koji Sato.
10 #include <linux/errno.h>
17 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
24 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
29 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
35 static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
36 __u64 key, int level, __u64 *ptrp)
40 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
42 ptr = nilfs_direct_get_ptr(direct, key);
43 if (ptr == NILFS_BMAP_INVALID_PTR)
50 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
51 __u64 key, __u64 *ptrp,
52 unsigned int maxblocks)
54 struct inode *dat = NULL;
59 if (key > NILFS_DIRECT_KEY_MAX)
61 ptr = nilfs_direct_get_ptr(direct, key);
62 if (ptr == NILFS_BMAP_INVALID_PTR)
65 if (NILFS_BMAP_USE_VBN(direct)) {
66 dat = nilfs_bmap_get_dat(direct);
67 ret = nilfs_dat_translate(dat, ptr, &blocknr);
73 maxblocks = min_t(unsigned int, maxblocks,
74 NILFS_DIRECT_KEY_MAX - key + 1);
75 for (cnt = 1; cnt < maxblocks &&
76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
77 NILFS_BMAP_INVALID_PTR;
80 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
85 if (ptr2 != ptr + cnt)
93 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
97 ptr = nilfs_bmap_find_target_seq(direct, key);
98 if (ptr != NILFS_BMAP_INVALID_PTR)
99 /* sequential access */
103 return nilfs_bmap_find_target_in_group(direct);
106 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
108 union nilfs_bmap_ptr_req req;
109 struct inode *dat = NULL;
110 struct buffer_head *bh;
113 if (key > NILFS_DIRECT_KEY_MAX)
115 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
118 if (NILFS_BMAP_USE_VBN(bmap)) {
119 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
120 dat = nilfs_bmap_get_dat(bmap);
122 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
124 /* ptr must be a pointer to a buffer head. */
125 bh = (struct buffer_head *)((unsigned long)ptr);
126 set_buffer_nilfs_volatile(bh);
128 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
129 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
131 if (!nilfs_bmap_dirty(bmap))
132 nilfs_bmap_set_dirty(bmap);
134 if (NILFS_BMAP_USE_VBN(bmap))
135 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
137 nilfs_inode_add_blocks(bmap->b_inode, 1);
142 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
144 union nilfs_bmap_ptr_req req;
148 if (key > NILFS_DIRECT_KEY_MAX ||
149 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
152 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
153 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
155 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
157 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
158 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
159 nilfs_inode_sub_blocks(bmap->b_inode, 1);
164 static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
169 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
170 if (nilfs_direct_get_ptr(direct, key) !=
171 NILFS_BMAP_INVALID_PTR) {
179 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
183 lastkey = NILFS_DIRECT_KEY_MAX + 1;
184 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
185 if (nilfs_direct_get_ptr(direct, key) !=
186 NILFS_BMAP_INVALID_PTR)
189 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
197 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
199 return key > NILFS_DIRECT_KEY_MAX;
202 static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
203 __u64 *keys, __u64 *ptrs, int nitems)
209 if (nitems > NILFS_DIRECT_NBLOCKS)
210 nitems = NILFS_DIRECT_NBLOCKS;
212 for (key = 0; key < nitems; key++) {
213 ptr = nilfs_direct_get_ptr(direct, key);
214 if (ptr != NILFS_BMAP_INVALID_PTR) {
223 int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
224 __u64 key, __u64 *keys, __u64 *ptrs, int n)
229 /* no need to allocate any resource for conversion */
232 ret = bmap->b_ops->bop_delete(bmap, key);
237 if (bmap->b_ops->bop_clear != NULL)
238 bmap->b_ops->bop_clear(bmap);
241 dptrs = nilfs_direct_dptrs(bmap);
242 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
243 if ((j < n) && (i == keys[j])) {
244 dptrs[i] = (i != key) ?
245 cpu_to_le64(ptrs[j]) :
246 NILFS_BMAP_INVALID_PTR;
249 dptrs[i] = NILFS_BMAP_INVALID_PTR;
252 nilfs_direct_init(bmap);
256 static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
257 struct buffer_head *bh)
259 struct nilfs_palloc_req oldreq, newreq;
265 if (!NILFS_BMAP_USE_VBN(bmap))
268 dat = nilfs_bmap_get_dat(bmap);
269 key = nilfs_bmap_data_get_key(bmap, bh);
270 ptr = nilfs_direct_get_ptr(bmap, key);
271 if (!buffer_nilfs_volatile(bh)) {
272 oldreq.pr_entry_nr = ptr;
273 newreq.pr_entry_nr = ptr;
274 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
277 nilfs_dat_commit_update(dat, &oldreq, &newreq,
278 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
279 set_buffer_nilfs_volatile(bh);
280 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
282 ret = nilfs_dat_mark_dirty(dat, ptr);
287 static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
288 __u64 key, __u64 ptr,
289 struct buffer_head **bh,
291 union nilfs_binfo *binfo)
293 struct inode *dat = nilfs_bmap_get_dat(direct);
294 union nilfs_bmap_ptr_req req;
298 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
300 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
301 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
302 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
307 static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
308 __u64 key, __u64 ptr,
309 struct buffer_head **bh,
311 union nilfs_binfo *binfo)
313 nilfs_direct_set_ptr(direct, key, blocknr);
315 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
316 binfo->bi_dat.bi_level = 0;
317 memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
322 static int nilfs_direct_assign(struct nilfs_bmap *bmap,
323 struct buffer_head **bh,
325 union nilfs_binfo *binfo)
330 key = nilfs_bmap_data_get_key(bmap, *bh);
331 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
332 nilfs_crit(bmap->b_inode->i_sb,
333 "%s (ino=%lu): invalid key: %llu",
335 bmap->b_inode->i_ino, (unsigned long long)key);
338 ptr = nilfs_direct_get_ptr(bmap, key);
339 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
340 nilfs_crit(bmap->b_inode->i_sb,
341 "%s (ino=%lu): invalid pointer: %llu",
343 bmap->b_inode->i_ino, (unsigned long long)ptr);
347 return NILFS_BMAP_USE_VBN(bmap) ?
348 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
349 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
352 static const struct nilfs_bmap_operations nilfs_direct_ops = {
353 .bop_lookup = nilfs_direct_lookup,
354 .bop_lookup_contig = nilfs_direct_lookup_contig,
355 .bop_insert = nilfs_direct_insert,
356 .bop_delete = nilfs_direct_delete,
359 .bop_propagate = nilfs_direct_propagate,
361 .bop_lookup_dirty_buffers = NULL,
363 .bop_assign = nilfs_direct_assign,
366 .bop_seek_key = nilfs_direct_seek_key,
367 .bop_last_key = nilfs_direct_last_key,
369 .bop_check_insert = nilfs_direct_check_insert,
370 .bop_check_delete = NULL,
371 .bop_gather_data = nilfs_direct_gather_data,
375 int nilfs_direct_init(struct nilfs_bmap *bmap)
377 bmap->b_ops = &nilfs_direct_ops;