2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
31 #include "trace_gfs2.h"
34 * gfs2_pin - Pin a buffer in memory
35 * @sdp: The superblock
36 * @bh: The buffer to be pinned
38 * The log lock must be held when calling this function
40 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42 struct gfs2_bufdata *bd;
44 BUG_ON(!current->journal_info);
46 clear_buffer_dirty(bh);
47 if (test_set_buffer_pinned(bh))
48 gfs2_assert_withdraw(sdp, 0);
49 if (!buffer_uptodate(bh))
50 gfs2_io_error_bh(sdp, bh);
52 /* If this buffer is in the AIL and it has already been written
53 * to in-place disk block, remove it from the AIL.
55 spin_lock(&sdp->sd_ail_lock);
57 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
58 spin_unlock(&sdp->sd_ail_lock);
60 atomic_inc(&sdp->sd_log_pinned);
61 trace_gfs2_pin(bd, 1);
64 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 static void maybe_release_space(struct gfs2_bufdata *bd)
71 struct gfs2_glock *gl = bd->bd_gl;
72 struct gfs2_sbd *sdp = gl->gl_sbd;
73 struct gfs2_rgrpd *rgd = gl->gl_object;
74 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
75 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77 if (bi->bi_clone == 0)
79 if (sdp->sd_args.ar_discard)
80 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
81 memcpy(bi->bi_clone + bi->bi_offset,
82 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
83 clear_bit(GBF_FULL, &bi->bi_flags);
84 rgd->rd_free_clone = rgd->rd_free;
88 * gfs2_unpin - Unpin a buffer
89 * @sdp: the filesystem the buffer belongs to
90 * @bh: The buffer to unpin
92 * @flags: The inode dirty flags
96 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 struct gfs2_bufdata *bd = bh->b_private;
101 BUG_ON(!buffer_uptodate(bh));
102 BUG_ON(!buffer_pinned(bh));
105 mark_buffer_dirty(bh);
106 clear_buffer_pinned(bh);
108 if (buffer_is_rgrp(bd))
109 maybe_release_space(bd);
111 spin_lock(&sdp->sd_ail_lock);
113 list_del(&bd->bd_ail_st_list);
116 struct gfs2_glock *gl = bd->bd_gl;
117 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118 atomic_inc(&gl->gl_ail_count);
121 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
122 spin_unlock(&sdp->sd_ail_lock);
124 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125 trace_gfs2_pin(bd, 0);
127 atomic_dec(&sdp->sd_log_pinned);
130 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
132 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133 (sdp->sd_log_flush_head != sdp->sd_log_head));
135 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136 sdp->sd_log_flush_head = 0;
137 sdp->sd_log_flush_wrapped = 1;
141 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
143 unsigned int lbn = sdp->sd_log_flush_head;
144 struct gfs2_journal_extent *je;
147 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148 if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149 block = je->dblock + lbn - je->lblock;
150 gfs2_log_incr_head(sdp);
159 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160 * @sdp: The superblock
162 * @error: The i/o status
164 * This finds the relavent buffers and unlocks then and sets the
165 * error flag according to the status of the i/o request. This is
166 * used when the log is writing data which has an in-place version
167 * that is pinned in the pagecache.
170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
173 struct buffer_head *bh, *next;
174 struct page *page = bvec->bv_page;
177 bh = page_buffers(page);
179 while (bh_offset(bh) < bvec->bv_offset)
180 bh = bh->b_this_page;
183 set_buffer_write_io_error(bh);
185 next = bh->b_this_page;
193 * gfs2_end_log_write - end of i/o to the log
195 * @error: Status of i/o request
197 * Each bio_vec contains either data from the pagecache or data
198 * relating to the log itself. Here we iterate over the bio_vec
199 * array, processing both kinds of data.
203 static void gfs2_end_log_write(struct bio *bio, int error)
205 struct gfs2_sbd *sdp = bio->bi_private;
206 struct bio_vec *bvec;
211 sdp->sd_log_error = error;
212 fs_err(sdp, "Error %d writing to log\n", error);
215 bio_for_each_segment(bvec, bio, i) {
216 page = bvec->bv_page;
217 if (page_has_buffers(page))
218 gfs2_end_log_write_bh(sdp, bvec, error);
220 mempool_free(page, gfs2_page_pool);
224 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
225 wake_up(&sdp->sd_log_flush_wait);
229 * gfs2_log_flush_bio - Submit any pending log bio
230 * @sdp: The superblock
233 * Submit any pending part-built or full bio to the block device. If
234 * there is no pending bio, then this is a no-op.
237 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
239 if (sdp->sd_log_bio) {
240 atomic_inc(&sdp->sd_log_in_flight);
241 submit_bio(rw, sdp->sd_log_bio);
242 sdp->sd_log_bio = NULL;
247 * gfs2_log_alloc_bio - Allocate a new bio for log writing
248 * @sdp: The superblock
249 * @blkno: The next device block number we want to write to
251 * This should never be called when there is a cached bio in the
252 * super block. When it returns, there will be a cached bio in the
253 * super block which will have as many bio_vecs as the device is
256 * Returns: Newly allocated bio
259 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
261 struct super_block *sb = sdp->sd_vfs;
262 unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
265 BUG_ON(sdp->sd_log_bio);
268 bio = bio_alloc(GFP_NOIO, nrvecs);
271 nrvecs = max(nrvecs/2, 1U);
274 bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275 bio->bi_bdev = sb->s_bdev;
276 bio->bi_end_io = gfs2_end_log_write;
277 bio->bi_private = sdp;
279 sdp->sd_log_bio = bio;
285 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
286 * @sdp: The superblock
287 * @blkno: The device block number we want to write to
289 * If there is a cached bio, then if the next block number is sequential
290 * with the previous one, return it, otherwise flush the bio to the
291 * device. If there is not a cached bio, or we just flushed it, then
292 * allocate a new one.
294 * Returns: The bio to use for log writes
297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
299 struct bio *bio = sdp->sd_log_bio;
303 nblk = bio->bi_sector + bio_sectors(bio);
304 nblk >>= sdp->sd_fsb2bb_shift;
307 gfs2_log_flush_bio(sdp, WRITE);
310 return gfs2_log_alloc_bio(sdp, blkno);
315 * gfs2_log_write - write to log
316 * @sdp: the filesystem
317 * @page: the page to write
318 * @size: the size of the data to write
319 * @offset: the offset within the page
321 * Try and add the page segment to the current bio. If that fails,
322 * submit the current bio to the device and create a new one, and
323 * then add the page segment to that.
326 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327 unsigned size, unsigned offset)
329 u64 blkno = gfs2_log_bmap(sdp);
333 bio = gfs2_log_get_bio(sdp, blkno);
334 ret = bio_add_page(bio, page, size, offset);
336 gfs2_log_flush_bio(sdp, WRITE);
337 bio = gfs2_log_alloc_bio(sdp, blkno);
338 ret = bio_add_page(bio, page, size, offset);
344 * gfs2_log_write_bh - write a buffer's content to the log
345 * @sdp: The super block
346 * @bh: The buffer pointing to the in-place location
348 * This writes the content of the buffer to the next available location
349 * in the log. The buffer will be unlocked once the i/o to the log has
353 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
355 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
359 * gfs2_log_write_page - write one block stored in a page, into the log
360 * @sdp: The superblock
361 * @page: The struct page
363 * This writes the first block-sized part of the page into the log. Note
364 * that the page must have been allocated from the gfs2_page_pool mempool
365 * and that after this has been called, ownership has been transferred and
366 * the page may be freed at any time.
369 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
371 struct super_block *sb = sdp->sd_vfs;
372 gfs2_log_write(sdp, page, sb->s_blocksize, 0);
375 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
376 u32 ld_length, u32 ld_data1)
378 void *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
379 struct gfs2_log_descriptor *ld = page_address(page);
381 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
382 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
383 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
384 ld->ld_type = cpu_to_be32(ld_type);
385 ld->ld_length = cpu_to_be32(ld_length);
386 ld->ld_data1 = cpu_to_be32(ld_data1);
391 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
393 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
394 struct gfs2_meta_header *mh;
395 struct gfs2_trans *tr;
397 lock_buffer(bd->bd_bh);
399 if (!list_empty(&bd->bd_list_tr))
401 tr = current->journal_info;
404 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
405 if (!list_empty(&le->le_list))
407 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
408 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
409 gfs2_meta_check(sdp, bd->bd_bh);
410 gfs2_pin(sdp, bd->bd_bh);
411 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
412 mh->__pad0 = cpu_to_be64(0);
413 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
414 sdp->sd_log_num_buf++;
415 list_add(&le->le_list, &sdp->sd_log_le_buf);
416 tr->tr_num_buf_new++;
418 gfs2_log_unlock(sdp);
419 unlock_buffer(bd->bd_bh);
422 static void gfs2_check_magic(struct buffer_head *bh)
427 clear_buffer_escaped(bh);
428 kaddr = kmap_atomic(bh->b_page);
429 ptr = kaddr + bh_offset(bh);
430 if (*ptr == cpu_to_be32(GFS2_MAGIC))
431 set_buffer_escaped(bh);
432 kunmap_atomic(kaddr);
435 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
436 unsigned int total, struct list_head *blist,
439 struct gfs2_log_descriptor *ld;
440 struct gfs2_bufdata *bd1 = NULL, *bd2;
447 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_le.le_list);
452 gfs2_log_unlock(sdp);
453 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
454 ld = page_address(page);
456 ptr = (__be64 *)(ld + 1);
459 list_for_each_entry_continue(bd1, blist, bd_le.le_list) {
460 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
462 gfs2_check_magic(bd1->bd_bh);
463 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
469 gfs2_log_unlock(sdp);
470 gfs2_log_write_page(sdp, page);
474 list_for_each_entry_continue(bd2, blist, bd_le.le_list) {
476 gfs2_log_unlock(sdp);
477 lock_buffer(bd2->bd_bh);
479 if (buffer_escaped(bd2->bd_bh)) {
481 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
482 ptr = page_address(page);
483 kaddr = kmap_atomic(bd2->bd_bh->b_page);
484 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
486 kunmap_atomic(kaddr);
488 clear_buffer_escaped(bd2->bd_bh);
489 unlock_buffer(bd2->bd_bh);
491 gfs2_log_write_page(sdp, page);
493 gfs2_log_write_bh(sdp, bd2->bd_bh);
503 gfs2_log_unlock(sdp);
506 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
508 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
510 gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
511 &sdp->sd_log_le_buf, 0);
514 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
516 struct list_head *head = &sdp->sd_log_le_buf;
517 struct gfs2_bufdata *bd;
519 while (!list_empty(head)) {
520 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
521 list_del_init(&bd->bd_le.le_list);
522 sdp->sd_log_num_buf--;
524 gfs2_unpin(sdp, bd->bd_bh, ai);
526 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
529 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
530 struct gfs2_log_header_host *head, int pass)
532 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
537 sdp->sd_found_blocks = 0;
538 sdp->sd_replayed_blocks = 0;
541 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
542 struct gfs2_log_descriptor *ld, __be64 *ptr,
545 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
546 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
547 struct gfs2_glock *gl = ip->i_gl;
548 unsigned int blks = be32_to_cpu(ld->ld_data1);
549 struct buffer_head *bh_log, *bh_ip;
553 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
556 gfs2_replay_incr_blk(sdp, &start);
558 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
559 blkno = be64_to_cpu(*ptr++);
561 sdp->sd_found_blocks++;
563 if (gfs2_revoke_check(sdp, blkno, start))
566 error = gfs2_replay_read_block(jd, start, &bh_log);
570 bh_ip = gfs2_meta_new(gl, blkno);
571 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
573 if (gfs2_meta_check(sdp, bh_ip))
576 mark_buffer_dirty(bh_ip);
584 sdp->sd_replayed_blocks++;
590 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
592 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
593 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
596 gfs2_meta_sync(ip->i_gl);
602 gfs2_meta_sync(ip->i_gl);
604 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
605 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
608 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
610 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
611 struct gfs2_glock *gl = bd->bd_gl;
612 struct gfs2_trans *tr;
614 tr = current->journal_info;
617 sdp->sd_log_num_revoke++;
618 atomic_inc(&gl->gl_revokes);
619 set_bit(GLF_LFLUSH, &gl->gl_flags);
620 list_add(&le->le_list, &sdp->sd_log_le_revoke);
623 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
625 struct gfs2_log_descriptor *ld;
626 struct gfs2_meta_header *mh;
628 struct list_head *head = &sdp->sd_log_le_revoke;
629 struct gfs2_bufdata *bd;
633 if (!sdp->sd_log_num_revoke)
636 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
637 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
638 ld = page_address(page);
639 offset = sizeof(struct gfs2_log_descriptor);
641 list_for_each_entry(bd, head, bd_le.le_list) {
642 sdp->sd_log_num_revoke--;
644 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
646 gfs2_log_write_page(sdp, page);
647 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
648 mh = page_address(page);
650 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
651 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
652 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
653 offset = sizeof(struct gfs2_meta_header);
656 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
657 offset += sizeof(u64);
659 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
661 gfs2_log_write_page(sdp, page);
664 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
666 struct list_head *head = &sdp->sd_log_le_revoke;
667 struct gfs2_bufdata *bd;
668 struct gfs2_glock *gl;
670 while (!list_empty(head)) {
671 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
672 list_del_init(&bd->bd_le.le_list);
674 atomic_dec(&gl->gl_revokes);
675 clear_bit(GLF_LFLUSH, &gl->gl_flags);
676 kmem_cache_free(gfs2_bufdata_cachep, bd);
680 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
681 struct gfs2_log_header_host *head, int pass)
683 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
688 sdp->sd_found_revokes = 0;
689 sdp->sd_replay_tail = head->lh_tail;
692 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
693 struct gfs2_log_descriptor *ld, __be64 *ptr,
696 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
697 unsigned int blks = be32_to_cpu(ld->ld_length);
698 unsigned int revokes = be32_to_cpu(ld->ld_data1);
699 struct buffer_head *bh;
705 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
708 offset = sizeof(struct gfs2_log_descriptor);
710 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
711 error = gfs2_replay_read_block(jd, start, &bh);
716 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
718 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
719 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
721 error = gfs2_revoke_add(sdp, blkno, start);
727 sdp->sd_found_revokes++;
731 offset += sizeof(u64);
735 offset = sizeof(struct gfs2_meta_header);
742 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
744 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
747 gfs2_revoke_clean(sdp);
753 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
754 jd->jd_jid, sdp->sd_found_revokes);
756 gfs2_revoke_clean(sdp);
760 * databuf_lo_add - Add a databuf to the transaction.
762 * This is used in two distinct cases:
763 * i) In ordered write mode
764 * We put the data buffer on a list so that we can ensure that its
765 * synced to disk at the right time
766 * ii) In journaled data mode
767 * We need to journal the data block in the same way as metadata in
768 * the functions above. The difference is that here we have a tag
769 * which is two __be64's being the block number (as per meta data)
770 * and a flag which says whether the data block needs escaping or
771 * not. This means we need a new log entry for each 251 or so data
772 * blocks, which isn't an enormous overhead but twice as much as
773 * for normal metadata blocks.
775 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
777 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
778 struct gfs2_trans *tr = current->journal_info;
779 struct address_space *mapping = bd->bd_bh->b_page->mapping;
780 struct gfs2_inode *ip = GFS2_I(mapping->host);
782 lock_buffer(bd->bd_bh);
785 if (!list_empty(&bd->bd_list_tr))
788 if (gfs2_is_jdata(ip)) {
790 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
793 if (!list_empty(&le->le_list))
796 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
797 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
798 if (gfs2_is_jdata(ip)) {
799 gfs2_pin(sdp, bd->bd_bh);
800 tr->tr_num_databuf_new++;
801 sdp->sd_log_num_databuf++;
802 list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
804 list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
807 gfs2_log_unlock(sdp);
808 unlock_buffer(bd->bd_bh);
812 * databuf_lo_before_commit - Scan the data buffers, writing as we go
816 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
818 unsigned int limit = buf_limit(sdp) / 2;
820 gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
821 &sdp->sd_log_le_databuf, 1);
824 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
825 struct gfs2_log_descriptor *ld,
826 __be64 *ptr, int pass)
828 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
829 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
830 struct gfs2_glock *gl = ip->i_gl;
831 unsigned int blks = be32_to_cpu(ld->ld_data1);
832 struct buffer_head *bh_log, *bh_ip;
837 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
840 gfs2_replay_incr_blk(sdp, &start);
841 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
842 blkno = be64_to_cpu(*ptr++);
843 esc = be64_to_cpu(*ptr++);
845 sdp->sd_found_blocks++;
847 if (gfs2_revoke_check(sdp, blkno, start))
850 error = gfs2_replay_read_block(jd, start, &bh_log);
854 bh_ip = gfs2_meta_new(gl, blkno);
855 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
859 __be32 *eptr = (__be32 *)bh_ip->b_data;
860 *eptr = cpu_to_be32(GFS2_MAGIC);
862 mark_buffer_dirty(bh_ip);
867 sdp->sd_replayed_blocks++;
873 /* FIXME: sort out accounting for log blocks etc. */
875 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
877 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
878 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
881 gfs2_meta_sync(ip->i_gl);
888 gfs2_meta_sync(ip->i_gl);
890 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
891 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
894 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
896 struct list_head *head = &sdp->sd_log_le_databuf;
897 struct gfs2_bufdata *bd;
899 while (!list_empty(head)) {
900 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
901 list_del_init(&bd->bd_le.le_list);
902 sdp->sd_log_num_databuf--;
903 gfs2_unpin(sdp, bd->bd_bh, ai);
905 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
909 const struct gfs2_log_operations gfs2_buf_lops = {
910 .lo_add = buf_lo_add,
911 .lo_before_commit = buf_lo_before_commit,
912 .lo_after_commit = buf_lo_after_commit,
913 .lo_before_scan = buf_lo_before_scan,
914 .lo_scan_elements = buf_lo_scan_elements,
915 .lo_after_scan = buf_lo_after_scan,
919 const struct gfs2_log_operations gfs2_revoke_lops = {
920 .lo_add = revoke_lo_add,
921 .lo_before_commit = revoke_lo_before_commit,
922 .lo_after_commit = revoke_lo_after_commit,
923 .lo_before_scan = revoke_lo_before_scan,
924 .lo_scan_elements = revoke_lo_scan_elements,
925 .lo_after_scan = revoke_lo_after_scan,
929 const struct gfs2_log_operations gfs2_rg_lops = {
933 const struct gfs2_log_operations gfs2_databuf_lops = {
934 .lo_add = databuf_lo_add,
935 .lo_before_commit = databuf_lo_before_commit,
936 .lo_after_commit = databuf_lo_after_commit,
937 .lo_scan_elements = databuf_lo_scan_elements,
938 .lo_after_scan = databuf_lo_after_scan,
939 .lo_name = "databuf",
942 const struct gfs2_log_operations *gfs2_log_ops[] = {