2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
31 * gfs2_pin - Pin a buffer in memory
32 * @sdp: The superblock
33 * @bh: The buffer to be pinned
35 * The log lock must be held when calling this function
37 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
39 struct gfs2_bufdata *bd;
41 gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
43 clear_buffer_dirty(bh);
44 if (test_set_buffer_pinned(bh))
45 gfs2_assert_withdraw(sdp, 0);
46 if (!buffer_uptodate(bh))
47 gfs2_io_error_bh(sdp, bh);
49 /* If this buffer is in the AIL and it has already been written
50 * to in-place disk block, remove it from the AIL.
53 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
58 * gfs2_unpin - Unpin a buffer
59 * @sdp: the filesystem the buffer belongs to
60 * @bh: The buffer to unpin
65 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
68 struct gfs2_bufdata *bd = bh->b_private;
70 gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
72 if (!buffer_pinned(bh))
73 gfs2_assert_withdraw(sdp, 0);
76 mark_buffer_dirty(bh);
77 clear_buffer_pinned(bh);
81 list_del(&bd->bd_ail_st_list);
84 struct gfs2_glock *gl = bd->bd_gl;
85 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
86 atomic_inc(&gl->gl_ail_count);
89 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
94 static void __glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
96 struct gfs2_glock *gl;
97 struct gfs2_trans *tr = current->journal_info;
101 gl = container_of(le, struct gfs2_glock, gl_le);
102 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
105 if (!list_empty(&le->le_list))
109 set_bit(GLF_DIRTY, &gl->gl_flags);
110 sdp->sd_log_num_gl++;
111 list_add(&le->le_list, &sdp->sd_log_le_gl);
114 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
117 __glock_lo_add(sdp, le);
118 gfs2_log_unlock(sdp);
121 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
123 struct list_head *head = &sdp->sd_log_le_gl;
124 struct gfs2_glock *gl;
126 while (!list_empty(head)) {
127 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
128 list_del_init(&gl->gl_le.le_list);
129 sdp->sd_log_num_gl--;
131 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
134 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
137 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
139 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
140 struct gfs2_trans *tr;
142 lock_buffer(bd->bd_bh);
144 if (!list_empty(&bd->bd_list_tr))
146 tr = current->journal_info;
149 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
150 if (!list_empty(&le->le_list))
152 __glock_lo_add(sdp, &bd->bd_gl->gl_le);
153 gfs2_meta_check(sdp, bd->bd_bh);
154 gfs2_pin(sdp, bd->bd_bh);
155 sdp->sd_log_num_buf++;
156 list_add(&le->le_list, &sdp->sd_log_le_buf);
157 tr->tr_num_buf_new++;
159 gfs2_log_unlock(sdp);
160 unlock_buffer(bd->bd_bh);
163 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
165 struct list_head *head = &tr->tr_list_buf;
166 struct gfs2_bufdata *bd;
169 while (!list_empty(head)) {
170 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
171 list_del_init(&bd->bd_list_tr);
174 gfs2_log_unlock(sdp);
175 gfs2_assert_warn(sdp, !tr->tr_num_buf);
178 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
180 struct buffer_head *bh;
181 struct gfs2_log_descriptor *ld;
182 struct gfs2_bufdata *bd1 = NULL, *bd2;
184 unsigned int offset = BUF_OFFSET;
190 limit = buf_limit(sdp);
191 /* for 4k blocks, limit = 503 */
194 total = sdp->sd_log_num_buf;
195 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
200 gfs2_log_unlock(sdp);
201 bh = gfs2_log_get_buf(sdp);
203 ld = (struct gfs2_log_descriptor *)bh->b_data;
204 ptr = (__be64 *)(bh->b_data + offset);
205 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
206 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
207 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
208 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
209 ld->ld_length = cpu_to_be32(num + 1);
210 ld->ld_data1 = cpu_to_be32(num);
211 ld->ld_data2 = cpu_to_be32(0);
212 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
215 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
217 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
222 gfs2_log_unlock(sdp);
223 set_buffer_dirty(bh);
224 ll_rw_block(WRITE, 1, &bh);
228 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
230 gfs2_log_unlock(sdp);
231 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
232 set_buffer_dirty(bh);
233 ll_rw_block(WRITE, 1, &bh);
242 gfs2_log_unlock(sdp);
245 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
247 struct list_head *head = &sdp->sd_log_le_buf;
248 struct gfs2_bufdata *bd;
250 while (!list_empty(head)) {
251 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
252 list_del_init(&bd->bd_le.le_list);
253 sdp->sd_log_num_buf--;
255 gfs2_unpin(sdp, bd->bd_bh, ai);
257 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
260 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
261 struct gfs2_log_header_host *head, int pass)
263 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
268 sdp->sd_found_blocks = 0;
269 sdp->sd_replayed_blocks = 0;
272 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
273 struct gfs2_log_descriptor *ld, __be64 *ptr,
276 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
277 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
278 struct gfs2_glock *gl = ip->i_gl;
279 unsigned int blks = be32_to_cpu(ld->ld_data1);
280 struct buffer_head *bh_log, *bh_ip;
284 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
287 gfs2_replay_incr_blk(sdp, &start);
289 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
290 blkno = be64_to_cpu(*ptr++);
292 sdp->sd_found_blocks++;
294 if (gfs2_revoke_check(sdp, blkno, start))
297 error = gfs2_replay_read_block(jd, start, &bh_log);
301 bh_ip = gfs2_meta_new(gl, blkno);
302 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
304 if (gfs2_meta_check(sdp, bh_ip))
307 mark_buffer_dirty(bh_ip);
315 sdp->sd_replayed_blocks++;
321 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
323 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
324 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
327 gfs2_meta_sync(ip->i_gl);
333 gfs2_meta_sync(ip->i_gl);
335 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
336 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
339 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
341 struct gfs2_trans *tr;
343 tr = current->journal_info;
348 sdp->sd_log_num_revoke++;
349 list_add(&le->le_list, &sdp->sd_log_le_revoke);
350 gfs2_log_unlock(sdp);
353 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
355 struct gfs2_log_descriptor *ld;
356 struct gfs2_meta_header *mh;
357 struct buffer_head *bh;
359 struct list_head *head = &sdp->sd_log_le_revoke;
360 struct gfs2_revoke *rv;
362 if (!sdp->sd_log_num_revoke)
365 bh = gfs2_log_get_buf(sdp);
366 ld = (struct gfs2_log_descriptor *)bh->b_data;
367 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
368 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
369 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
370 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
371 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
373 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
374 ld->ld_data2 = cpu_to_be32(0);
375 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
376 offset = sizeof(struct gfs2_log_descriptor);
378 while (!list_empty(head)) {
379 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
380 list_del_init(&rv->rv_le.le_list);
381 sdp->sd_log_num_revoke--;
383 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
384 set_buffer_dirty(bh);
385 ll_rw_block(WRITE, 1, &bh);
387 bh = gfs2_log_get_buf(sdp);
388 mh = (struct gfs2_meta_header *)bh->b_data;
389 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
390 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
391 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
392 offset = sizeof(struct gfs2_meta_header);
395 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
398 offset += sizeof(u64);
400 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
402 set_buffer_dirty(bh);
403 ll_rw_block(WRITE, 1, &bh);
406 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
407 struct gfs2_log_header_host *head, int pass)
409 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
414 sdp->sd_found_revokes = 0;
415 sdp->sd_replay_tail = head->lh_tail;
418 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
419 struct gfs2_log_descriptor *ld, __be64 *ptr,
422 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
423 unsigned int blks = be32_to_cpu(ld->ld_length);
424 unsigned int revokes = be32_to_cpu(ld->ld_data1);
425 struct buffer_head *bh;
431 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
434 offset = sizeof(struct gfs2_log_descriptor);
436 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
437 error = gfs2_replay_read_block(jd, start, &bh);
442 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
444 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
445 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
447 error = gfs2_revoke_add(sdp, blkno, start);
451 sdp->sd_found_revokes++;
455 offset += sizeof(u64);
459 offset = sizeof(struct gfs2_meta_header);
466 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
468 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
471 gfs2_revoke_clean(sdp);
477 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
478 jd->jd_jid, sdp->sd_found_revokes);
480 gfs2_revoke_clean(sdp);
483 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
485 struct gfs2_rgrpd *rgd;
486 struct gfs2_trans *tr = current->journal_info;
490 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
493 if (!list_empty(&le->le_list)){
494 gfs2_log_unlock(sdp);
497 gfs2_rgrp_bh_hold(rgd);
498 sdp->sd_log_num_rg++;
499 list_add(&le->le_list, &sdp->sd_log_le_rg);
500 gfs2_log_unlock(sdp);
503 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
505 struct list_head *head = &sdp->sd_log_le_rg;
506 struct gfs2_rgrpd *rgd;
508 while (!list_empty(head)) {
509 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
510 list_del_init(&rgd->rd_le.le_list);
511 sdp->sd_log_num_rg--;
513 gfs2_rgrp_repolish_clones(rgd);
514 gfs2_rgrp_bh_put(rgd);
516 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
520 * databuf_lo_add - Add a databuf to the transaction.
522 * This is used in two distinct cases:
523 * i) In ordered write mode
524 * We put the data buffer on a list so that we can ensure that its
525 * synced to disk at the right time
526 * ii) In journaled data mode
527 * We need to journal the data block in the same way as metadata in
528 * the functions above. The difference is that here we have a tag
529 * which is two __be64's being the block number (as per meta data)
530 * and a flag which says whether the data block needs escaping or
531 * not. This means we need a new log entry for each 251 or so data
532 * blocks, which isn't an enormous overhead but twice as much as
533 * for normal metadata blocks.
535 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
537 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
538 struct gfs2_trans *tr = current->journal_info;
539 struct address_space *mapping = bd->bd_bh->b_page->mapping;
540 struct gfs2_inode *ip = GFS2_I(mapping->host);
542 lock_buffer(bd->bd_bh);
544 if (!list_empty(&bd->bd_list_tr))
547 if (gfs2_is_jdata(ip)) {
549 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
551 if (!list_empty(&le->le_list))
554 __glock_lo_add(sdp, &bd->bd_gl->gl_le);
555 if (gfs2_is_jdata(ip)) {
556 gfs2_pin(sdp, bd->bd_bh);
557 tr->tr_num_databuf_new++;
558 sdp->sd_log_num_jdata++;
560 sdp->sd_log_num_databuf++;
561 list_add(&le->le_list, &sdp->sd_log_le_databuf);
563 gfs2_log_unlock(sdp);
564 unlock_buffer(bd->bd_bh);
567 static int gfs2_check_magic(struct buffer_head *bh)
569 struct page *page = bh->b_page;
574 kaddr = kmap_atomic(page, KM_USER0);
575 ptr = kaddr + bh_offset(bh);
576 if (*ptr == cpu_to_be32(GFS2_MAGIC))
578 kunmap_atomic(kaddr, KM_USER0);
584 * databuf_lo_before_commit - Scan the data buffers, writing as we go
586 * Here we scan through the lists of buffers and make the assumption
587 * that any buffer thats been pinned is being journaled, and that
588 * any unpinned buffer is an ordered write data buffer and therefore
589 * will be written back rather than journaled.
591 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
594 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
595 struct buffer_head *bh = NULL,*bh1 = NULL;
596 struct gfs2_log_descriptor *ld;
598 unsigned int total_dbuf;
599 unsigned int total_jdata;
603 limit = databuf_limit(sdp);
606 * Start writing ordered buffers, write journaled buffers
607 * into the log along with a header
610 total_dbuf = sdp->sd_log_num_databuf;
611 total_jdata = sdp->sd_log_num_jdata;
612 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
619 list_for_each_entry_safe_continue(bd1, bdt,
620 &sdp->sd_log_le_databuf,
622 /* store off the buffer head in a local ptr since
623 * gfs2_bufdata might change when we drop the log lock
627 /* An ordered write buffer */
628 if (bh1 && !buffer_pinned(bh1)) {
629 list_move(&bd1->bd_le.le_list, &started);
632 bd2 = list_prepare_entry(bd2,
633 &sdp->sd_log_le_databuf,
638 if (buffer_dirty(bh1)) {
641 gfs2_log_unlock(sdp);
643 ll_rw_block(SWRITE, 1, &bh1);
651 } else if (bh1) { /* A journaled buffer */
653 gfs2_log_unlock(sdp);
655 bh = gfs2_log_get_buf(sdp);
656 ld = (struct gfs2_log_descriptor *)
658 ptr = (__be64 *)(bh->b_data +
660 ld->ld_header.mh_magic =
661 cpu_to_be32(GFS2_MAGIC);
662 ld->ld_header.mh_type =
663 cpu_to_be32(GFS2_METATYPE_LD);
664 ld->ld_header.mh_format =
665 cpu_to_be32(GFS2_FORMAT_LD);
667 cpu_to_be32(GFS2_LOG_DESC_JDATA);
668 ld->ld_length = cpu_to_be32(num + 1);
669 ld->ld_data1 = cpu_to_be32(num);
670 ld->ld_data2 = cpu_to_be32(0);
671 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
673 magic = gfs2_check_magic(bh1);
674 *ptr++ = cpu_to_be64(bh1->b_blocknr);
675 *ptr++ = cpu_to_be64((__u64)magic);
676 clear_buffer_escaped(bh1);
677 if (unlikely(magic != 0))
678 set_buffer_escaped(bh1);
684 sdp->sd_log_num_databuf--;
685 list_del_init(&bd1->bd_le.le_list);
688 bd2 = list_prepare_entry(bd2,
689 &sdp->sd_log_le_databuf,
692 kmem_cache_free(gfs2_bufdata_cachep, bd1);
695 gfs2_log_unlock(sdp);
697 set_buffer_dirty(bh);
698 ll_rw_block(WRITE, 1, &bh);
704 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
708 /* copy buffer if it needs escaping */
709 gfs2_log_unlock(sdp);
710 if (unlikely(buffer_escaped(bd2->bd_bh))) {
712 struct page *page = bd2->bd_bh->b_page;
713 bh = gfs2_log_get_buf(sdp);
714 kaddr = kmap_atomic(page, KM_USER0);
716 kaddr + bh_offset(bd2->bd_bh),
717 sdp->sd_sb.sb_bsize);
718 kunmap_atomic(kaddr, KM_USER0);
719 *(__be32 *)bh->b_data = 0;
721 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
723 set_buffer_dirty(bh);
724 ll_rw_block(WRITE, 1, &bh);
730 BUG_ON(total_dbuf < num);
734 gfs2_log_unlock(sdp);
736 /* Wait on all ordered buffers */
737 while (!list_empty(&started)) {
739 bd1 = list_entry(started.next, struct gfs2_bufdata,
741 list_del_init(&bd1->bd_le.le_list);
742 sdp->sd_log_num_databuf--;
745 bh->b_private = NULL;
747 gfs2_log_unlock(sdp);
751 gfs2_log_unlock(sdp);
753 kmem_cache_free(gfs2_bufdata_cachep, bd1);
756 /* We've removed all the ordered write bufs here, so only jdata left */
757 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
760 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
761 struct gfs2_log_descriptor *ld,
762 __be64 *ptr, int pass)
764 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
765 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
766 struct gfs2_glock *gl = ip->i_gl;
767 unsigned int blks = be32_to_cpu(ld->ld_data1);
768 struct buffer_head *bh_log, *bh_ip;
773 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
776 gfs2_replay_incr_blk(sdp, &start);
777 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
778 blkno = be64_to_cpu(*ptr++);
779 esc = be64_to_cpu(*ptr++);
781 sdp->sd_found_blocks++;
783 if (gfs2_revoke_check(sdp, blkno, start))
786 error = gfs2_replay_read_block(jd, start, &bh_log);
790 bh_ip = gfs2_meta_new(gl, blkno);
791 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
795 __be32 *eptr = (__be32 *)bh_ip->b_data;
796 *eptr = cpu_to_be32(GFS2_MAGIC);
798 mark_buffer_dirty(bh_ip);
805 sdp->sd_replayed_blocks++;
811 /* FIXME: sort out accounting for log blocks etc. */
813 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
815 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
816 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
819 gfs2_meta_sync(ip->i_gl);
826 gfs2_meta_sync(ip->i_gl);
828 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
829 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
832 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
834 struct list_head *head = &sdp->sd_log_le_databuf;
835 struct gfs2_bufdata *bd;
837 while (!list_empty(head)) {
838 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
839 list_del_init(&bd->bd_le.le_list);
840 sdp->sd_log_num_databuf--;
841 sdp->sd_log_num_jdata--;
842 gfs2_unpin(sdp, bd->bd_bh, ai);
844 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
845 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
849 const struct gfs2_log_operations gfs2_glock_lops = {
850 .lo_add = glock_lo_add,
851 .lo_after_commit = glock_lo_after_commit,
855 const struct gfs2_log_operations gfs2_buf_lops = {
856 .lo_add = buf_lo_add,
857 .lo_incore_commit = buf_lo_incore_commit,
858 .lo_before_commit = buf_lo_before_commit,
859 .lo_after_commit = buf_lo_after_commit,
860 .lo_before_scan = buf_lo_before_scan,
861 .lo_scan_elements = buf_lo_scan_elements,
862 .lo_after_scan = buf_lo_after_scan,
866 const struct gfs2_log_operations gfs2_revoke_lops = {
867 .lo_add = revoke_lo_add,
868 .lo_before_commit = revoke_lo_before_commit,
869 .lo_before_scan = revoke_lo_before_scan,
870 .lo_scan_elements = revoke_lo_scan_elements,
871 .lo_after_scan = revoke_lo_after_scan,
875 const struct gfs2_log_operations gfs2_rg_lops = {
877 .lo_after_commit = rg_lo_after_commit,
881 const struct gfs2_log_operations gfs2_databuf_lops = {
882 .lo_add = databuf_lo_add,
883 .lo_incore_commit = buf_lo_incore_commit,
884 .lo_before_commit = databuf_lo_before_commit,
885 .lo_after_commit = databuf_lo_after_commit,
886 .lo_scan_elements = databuf_lo_scan_elements,
887 .lo_after_scan = databuf_lo_after_scan,
888 .lo_name = "databuf",
891 const struct gfs2_log_operations *gfs2_log_ops[] = {