2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
26 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
28 struct gfs2_glock *gl;
30 get_transaction->tr_touched = 1;
32 if (!list_empty(&le->le_list))
35 gl = container_of(le, struct gfs2_glock, gl_le);
36 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
39 set_bit(GLF_DIRTY, &gl->gl_flags);
43 list_add(&le->le_list, &sdp->sd_log_le_gl);
47 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
49 struct list_head *head = &sdp->sd_log_le_gl;
50 struct gfs2_glock *gl;
52 while (!list_empty(head)) {
53 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
54 list_del_init(&gl->gl_le.le_list);
57 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
60 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
63 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
65 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
66 struct gfs2_trans *tr;
68 if (!list_empty(&bd->bd_list_tr))
74 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
76 if (!list_empty(&le->le_list))
79 gfs2_trans_add_gl(bd->bd_gl);
81 gfs2_meta_check(sdp, bd->bd_bh);
82 gfs2_pin(sdp, bd->bd_bh);
85 sdp->sd_log_num_buf++;
86 list_add(&le->le_list, &sdp->sd_log_le_buf);
92 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
94 struct list_head *head = &tr->tr_list_buf;
95 struct gfs2_bufdata *bd;
97 while (!list_empty(head)) {
98 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
99 list_del_init(&bd->bd_list_tr);
102 gfs2_assert_warn(sdp, !tr->tr_num_buf);
105 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
107 struct buffer_head *bh;
108 struct gfs2_log_descriptor *ld;
109 struct gfs2_bufdata *bd1 = NULL, *bd2;
110 unsigned int total = sdp->sd_log_num_buf;
111 unsigned int offset = sizeof(struct gfs2_log_descriptor);
117 offset += (sizeof(__be64) - 1);
118 offset &= ~(sizeof(__be64) - 1);
119 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
120 /* for 4k blocks, limit = 503 */
122 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
127 bh = gfs2_log_get_buf(sdp);
128 ld = (struct gfs2_log_descriptor *)bh->b_data;
129 ptr = (__be64 *)(bh->b_data + offset);
130 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
131 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
132 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
133 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
134 ld->ld_length = cpu_to_be32(num + 1);
135 ld->ld_data1 = cpu_to_be32(num);
136 ld->ld_data2 = cpu_to_be32(0);
137 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
140 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf, bd_le.le_list) {
141 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
146 set_buffer_dirty(bh);
147 ll_rw_block(WRITE, 1, &bh);
150 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf, bd_le.le_list) {
151 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
152 set_buffer_dirty(bh);
153 ll_rw_block(WRITE, 1, &bh);
162 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
164 struct list_head *head = &sdp->sd_log_le_buf;
165 struct gfs2_bufdata *bd;
167 while (!list_empty(head)) {
168 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
169 list_del_init(&bd->bd_le.le_list);
170 sdp->sd_log_num_buf--;
172 gfs2_unpin(sdp, bd->bd_bh, ai);
174 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
177 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
178 struct gfs2_log_header *head, int pass)
180 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
185 sdp->sd_found_blocks = 0;
186 sdp->sd_replayed_blocks = 0;
189 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
190 struct gfs2_log_descriptor *ld, __be64 *ptr,
193 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
194 struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
195 unsigned int blks = be32_to_cpu(ld->ld_data1);
196 struct buffer_head *bh_log, *bh_ip;
200 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
203 gfs2_replay_incr_blk(sdp, &start);
205 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
206 blkno = be64_to_cpu(*ptr++);
208 sdp->sd_found_blocks++;
210 if (gfs2_revoke_check(sdp, blkno, start))
213 error = gfs2_replay_read_block(jd, start, &bh_log);
217 bh_ip = gfs2_meta_new(gl, blkno);
218 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
220 if (gfs2_meta_check(sdp, bh_ip))
223 mark_buffer_dirty(bh_ip);
231 sdp->sd_replayed_blocks++;
237 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
239 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
242 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
248 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
250 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
251 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
254 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
256 struct gfs2_trans *tr;
258 tr = get_transaction;
263 sdp->sd_log_num_revoke++;
264 list_add(&le->le_list, &sdp->sd_log_le_revoke);
265 gfs2_log_unlock(sdp);
268 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
270 struct gfs2_log_descriptor *ld;
271 struct gfs2_meta_header *mh;
272 struct buffer_head *bh;
274 struct list_head *head = &sdp->sd_log_le_revoke;
275 struct gfs2_revoke *rv;
277 if (!sdp->sd_log_num_revoke)
280 bh = gfs2_log_get_buf(sdp);
281 ld = (struct gfs2_log_descriptor *)bh->b_data;
282 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
283 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
284 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
285 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
286 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(uint64_t)));
287 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
288 ld->ld_data2 = cpu_to_be32(0);
289 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
290 offset = sizeof(struct gfs2_log_descriptor);
292 while (!list_empty(head)) {
293 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
294 list_del(&rv->rv_le.le_list);
295 sdp->sd_log_num_revoke--;
297 if (offset + sizeof(uint64_t) > sdp->sd_sb.sb_bsize) {
298 set_buffer_dirty(bh);
299 ll_rw_block(WRITE, 1, &bh);
301 bh = gfs2_log_get_buf(sdp);
302 mh = (struct gfs2_meta_header *)bh->b_data;
303 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
304 mh->mh_type = cpu_to_be16(GFS2_METATYPE_LB);
305 mh->mh_format = cpu_to_be16(GFS2_FORMAT_LB);
306 offset = sizeof(struct gfs2_meta_header);
309 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
312 offset += sizeof(uint64_t);
314 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
316 set_buffer_dirty(bh);
317 ll_rw_block(WRITE, 1, &bh);
320 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
321 struct gfs2_log_header *head, int pass)
323 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
328 sdp->sd_found_revokes = 0;
329 sdp->sd_replay_tail = head->lh_tail;
332 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
333 struct gfs2_log_descriptor *ld, __be64 *ptr,
336 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
337 unsigned int blks = be32_to_cpu(ld->ld_length);
338 unsigned int revokes = be32_to_cpu(ld->ld_data1);
339 struct buffer_head *bh;
345 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
348 offset = sizeof(struct gfs2_log_descriptor);
350 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
351 error = gfs2_replay_read_block(jd, start, &bh);
356 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
358 while (offset + sizeof(uint64_t) <= sdp->sd_sb.sb_bsize) {
359 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
361 error = gfs2_revoke_add(sdp, blkno, start);
365 sdp->sd_found_revokes++;
369 offset += sizeof(uint64_t);
373 offset = sizeof(struct gfs2_meta_header);
380 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
382 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
385 gfs2_revoke_clean(sdp);
391 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
392 jd->jd_jid, sdp->sd_found_revokes);
394 gfs2_revoke_clean(sdp);
397 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
399 struct gfs2_rgrpd *rgd;
401 get_transaction->tr_touched = 1;
403 if (!list_empty(&le->le_list))
406 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
407 gfs2_rgrp_bh_hold(rgd);
410 sdp->sd_log_num_rg++;
411 list_add(&le->le_list, &sdp->sd_log_le_rg);
412 gfs2_log_unlock(sdp);
415 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
417 struct list_head *head = &sdp->sd_log_le_rg;
418 struct gfs2_rgrpd *rgd;
420 while (!list_empty(head)) {
421 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
422 list_del_init(&rgd->rd_le.le_list);
423 sdp->sd_log_num_rg--;
425 gfs2_rgrp_repolish_clones(rgd);
426 gfs2_rgrp_bh_put(rgd);
428 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
432 * databuf_lo_add - Add a databuf to the transaction.
434 * This is used in two distinct cases:
435 * i) In ordered write mode
436 * We put the data buffer on a list so that we can ensure that its
437 * synced to disk at the right time
438 * ii) In journaled data mode
439 * We need to journal the data block in the same way as metadata in
440 * the functions above. The difference is that here we have a tag
441 * which is two __be64's being the block number (as per meta data)
442 * and a flag which says whether the data block needs escaping or
443 * not. This means we need a new log entry for each 251 or so data
444 * blocks, which isn't an enormous overhead but twice as much as
445 * for normal metadata blocks.
447 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
449 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
450 struct gfs2_trans *tr = get_transaction;
451 struct address_space *mapping = bd->bd_bh->b_page->mapping;
452 struct gfs2_inode *ip = get_v2ip(mapping->host);
455 if (!list_empty(&bd->bd_list_tr) &&
456 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
458 gfs2_trans_add_gl(bd->bd_gl);
459 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
460 gfs2_pin(sdp, bd->bd_bh);
463 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
464 sdp->sd_log_num_jdata++;
465 sdp->sd_log_num_databuf++;
466 list_add(&le->le_list, &sdp->sd_log_le_databuf);
467 gfs2_log_unlock(sdp);
470 static int gfs2_check_magic(struct buffer_head *bh)
472 struct page *page = bh->b_page;
477 kaddr = kmap_atomic(page, KM_USER0);
478 ptr = kaddr + bh_offset(bh);
479 if (*ptr == cpu_to_be32(GFS2_MAGIC))
481 kunmap_atomic(page, KM_USER0);
487 * databuf_lo_before_commit - Scan the data buffers, writing as we go
489 * Here we scan through the lists of buffers and make the assumption
490 * that any buffer thats been pinned is being journaled, and that
491 * any unpinned buffer is an ordered write data buffer and therefore
492 * will be written back rather than journaled.
494 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
497 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
498 struct buffer_head *bh = NULL;
499 unsigned int offset = sizeof(struct gfs2_log_descriptor);
500 struct gfs2_log_descriptor *ld;
502 unsigned int total_dbuf = sdp->sd_log_num_databuf;
503 unsigned int total_jdata = sdp->sd_log_num_jdata;
507 offset += (2*sizeof(__be64) - 1);
508 offset &= ~(2*sizeof(__be64) - 1);
509 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
511 /* printk(KERN_INFO "totals: jdata=%u dbuf=%u\n", total_jdata, total_dbuf); */
513 * Start writing ordered buffers, write journaled buffers
514 * into the log along with a header
516 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf, bd_le.le_list);
522 list_for_each_entry_safe_continue(bd1, bdt, &sdp->sd_log_le_databuf, bd_le.le_list) {
524 /* An ordered write buffer */
525 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
526 list_move(&bd1->bd_le.le_list, &started);
529 bd2 = list_prepare_entry(bd2, &sdp->sd_log_le_databuf, bd_le.le_list);
534 gfs2_log_unlock(sdp);
535 if (buffer_dirty(bd1->bd_bh)) {
536 wait_on_buffer(bd1->bd_bh);
537 ll_rw_block(WRITE, 1, &bd1->bd_bh);
542 gfs2_log_unlock(sdp);
544 } else if (bd1->bd_bh) { /* A journaled buffer */
546 gfs2_log_unlock(sdp);
547 /* printk(KERN_INFO "journaled buffer\n"); */
549 bh = gfs2_log_get_buf(sdp);
550 ld = (struct gfs2_log_descriptor *)bh->b_data;
551 ptr = (__be64 *)(bh->b_data + offset);
552 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
553 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
554 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
555 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_JDATA);
556 ld->ld_length = cpu_to_be32(num + 1);
557 ld->ld_data1 = cpu_to_be32(num);
558 ld->ld_data2 = cpu_to_be32(0);
559 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
561 magic = gfs2_check_magic(bd1->bd_bh);
562 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
563 *ptr++ = cpu_to_be64((__u64)magic);
564 clear_buffer_escaped(bd1->bd_bh);
565 if (unlikely(magic != 0))
566 set_buffer_escaped(bd1->bd_bh);
572 set_buffer_dirty(bh);
573 ll_rw_block(WRITE, 1, &bh);
577 /* printk(KERN_INFO "totals2: jdata=%u dbuf=%u\n", total_jdata, total_dbuf); */
578 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf, bd_le.le_list) {
581 /* copy buffer if it needs escaping */
582 if (unlikely(buffer_escaped(bd2->bd_bh))) {
584 struct page *page = bd2->bd_bh->b_page;
585 bh = gfs2_log_get_buf(sdp);
586 kaddr = kmap_atomic(page, KM_USER0);
587 memcpy(bh->b_data, kaddr + bh_offset(bd2->bd_bh), sdp->sd_sb.sb_bsize);
588 kunmap_atomic(page, KM_USER0);
589 *(__be32 *)bh->b_data = 0;
591 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
593 set_buffer_dirty(bh);
594 ll_rw_block(WRITE, 1, &bh);
602 /* printk(KERN_INFO "wait on ordered data buffers\n"); */
603 /* Wait on all ordered buffers */
604 while (!list_empty(&started)) {
605 bd1 = list_entry(started.next, struct gfs2_bufdata, bd_le.le_list);
606 list_del(&bd1->bd_le.le_list);
607 sdp->sd_log_num_databuf--;
613 gfs2_log_unlock(sdp);
617 gfs2_log_unlock(sdp);
622 /* printk(KERN_INFO "sd_log_num_databuf %u sd_log_num_jdata %u\n", sdp->sd_log_num_databuf, sdp->sd_log_num_jdata); */
623 /* We've removed all the ordered write bufs here, so only jdata left */
624 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
627 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
628 struct gfs2_log_descriptor *ld,
629 __be64 *ptr, int pass)
631 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
632 struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
633 unsigned int blks = be32_to_cpu(ld->ld_data1);
634 struct buffer_head *bh_log, *bh_ip;
639 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
642 gfs2_replay_incr_blk(sdp, &start);
643 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
644 blkno = be64_to_cpu(*ptr++);
645 esc = be64_to_cpu(*ptr++);
647 sdp->sd_found_blocks++;
649 if (gfs2_revoke_check(sdp, blkno, start))
652 error = gfs2_replay_read_block(jd, start, &bh_log);
656 bh_ip = gfs2_meta_new(gl, blkno);
657 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
661 __be32 *eptr = (__be32 *)bh_ip->b_data;
662 *eptr = cpu_to_be32(GFS2_MAGIC);
664 mark_buffer_dirty(bh_ip);
671 sdp->sd_replayed_blocks++;
677 /* FIXME: sort out accounting for log blocks etc. */
679 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
681 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
684 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
691 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
693 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
694 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
697 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
699 struct list_head *head = &sdp->sd_log_le_databuf;
700 struct gfs2_bufdata *bd;
702 while (!list_empty(head)) {
703 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
704 list_del_init(&bd->bd_le.le_list);
705 sdp->sd_log_num_databuf--;
706 sdp->sd_log_num_jdata--;
707 gfs2_unpin(sdp, bd->bd_bh, ai);
711 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
712 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
716 struct gfs2_log_operations gfs2_glock_lops = {
717 .lo_add = glock_lo_add,
718 .lo_after_commit = glock_lo_after_commit,
722 struct gfs2_log_operations gfs2_buf_lops = {
723 .lo_add = buf_lo_add,
724 .lo_incore_commit = buf_lo_incore_commit,
725 .lo_before_commit = buf_lo_before_commit,
726 .lo_after_commit = buf_lo_after_commit,
727 .lo_before_scan = buf_lo_before_scan,
728 .lo_scan_elements = buf_lo_scan_elements,
729 .lo_after_scan = buf_lo_after_scan,
733 struct gfs2_log_operations gfs2_revoke_lops = {
734 .lo_add = revoke_lo_add,
735 .lo_before_commit = revoke_lo_before_commit,
736 .lo_before_scan = revoke_lo_before_scan,
737 .lo_scan_elements = revoke_lo_scan_elements,
738 .lo_after_scan = revoke_lo_after_scan,
742 struct gfs2_log_operations gfs2_rg_lops = {
744 .lo_after_commit = rg_lo_after_commit,
748 struct gfs2_log_operations gfs2_databuf_lops = {
749 .lo_add = databuf_lo_add,
750 .lo_incore_commit = buf_lo_incore_commit,
751 .lo_before_commit = databuf_lo_before_commit,
752 .lo_after_commit = databuf_lo_after_commit,
753 .lo_scan_elements = databuf_lo_scan_elements,
754 .lo_after_scan = databuf_lo_after_scan,
758 struct gfs2_log_operations *gfs2_log_ops[] = {