1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
8 * jfs_txnmgr.c: transaction manager
11 * transaction starts with txBegin() and ends with txCommit()
14 * tlock is acquired at the time of update;
15 * (obviate scan at commit time for xtree and dtree)
16 * tlock and mp points to each other;
17 * (no hashlist for mp -> tlock).
20 * tlock on in-memory inode:
21 * in-place tlock in the in-memory inode itself;
22 * converted to page lock by iWrite() at commit time.
24 * tlock during write()/mmap() under anonymous transaction (tid = 0):
25 * transferred (?) to transaction at commit time.
27 * use the page itself to update allocation maps
28 * (obviate intermediate replication of allocation/deallocation data)
29 * hold on to mp+lock thru update of maps
33 #include <linux/vmalloc.h>
34 #include <linux/completion.h>
35 #include <linux/freezer.h>
36 #include <linux/module.h>
37 #include <linux/moduleparam.h>
38 #include <linux/kthread.h>
39 #include <linux/seq_file.h>
40 #include "jfs_incore.h"
41 #include "jfs_inode.h"
42 #include "jfs_filsys.h"
43 #include "jfs_metapage.h"
44 #include "jfs_dinode.h"
47 #include "jfs_superblock.h"
48 #include "jfs_debug.h"
51 * transaction management structures
54 int freetid; /* index of a free tid structure */
55 int freelock; /* index first free lock word */
56 wait_queue_head_t freewait; /* eventlist of free tblock */
57 wait_queue_head_t freelockwait; /* eventlist of free tlock */
58 wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
59 int tlocksInUse; /* Number of tlocks in use */
60 spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
61 /* struct tblock *sync_queue; * Transactions waiting for data sync */
62 struct list_head unlock_queue; /* Txns waiting to be released */
63 struct list_head anon_list; /* inodes having anonymous txns */
64 struct list_head anon_list2; /* inodes having anonymous txns
65 that couldn't be sync'ed */
68 int jfs_tlocks_low; /* Indicates low number of available tlocks */
70 #ifdef CONFIG_JFS_STATISTICS
74 uint txBegin_lockslow;
77 uint txBeginAnon_barrier;
78 uint txBeginAnon_lockslow;
80 uint txLockAlloc_freelock;
84 static int nTxBlock = -1; /* number of transaction blocks */
85 module_param(nTxBlock, int, 0);
86 MODULE_PARM_DESC(nTxBlock,
87 "Number of transaction blocks (max:65536)");
89 static int nTxLock = -1; /* number of transaction locks */
90 module_param(nTxLock, int, 0);
91 MODULE_PARM_DESC(nTxLock,
92 "Number of transaction locks (max:65536)");
94 struct tblock *TxBlock; /* transaction block table */
95 static int TxLockLWM; /* Low water mark for number of txLocks used */
96 static int TxLockHWM; /* High water mark for number of txLocks used */
97 static int TxLockVHWM; /* Very High water mark */
98 struct tlock *TxLock; /* transaction lock table */
101 * transaction management lock
103 static DEFINE_SPINLOCK(jfsTxnLock);
105 #define TXN_LOCK() spin_lock(&jfsTxnLock)
106 #define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
108 #define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock)
109 #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
110 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
112 static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
113 static int jfs_commit_thread_waking;
116 * Retry logic exist outside these macros to protect from spurrious wakeups.
118 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
120 DECLARE_WAITQUEUE(wait, current);
122 add_wait_queue(event, &wait);
123 set_current_state(TASK_UNINTERRUPTIBLE);
126 remove_wait_queue(event, &wait);
129 #define TXN_SLEEP(event)\
131 TXN_SLEEP_DROP_LOCK(event);\
135 #define TXN_WAKEUP(event) wake_up_all(event)
141 tid_t maxtid; /* 4: biggest tid ever used */
142 lid_t maxlid; /* 4: biggest lid ever used */
143 int ntid; /* 4: # of transactions performed */
144 int nlid; /* 4: # of tlocks acquired */
145 int waitlock; /* 4: # of tlock wait */
151 static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
152 struct tlock *tlck, struct commit *cd);
153 static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
155 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
156 struct tlock * tlck);
157 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
158 struct tlock * tlck);
159 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
160 struct tblock * tblk);
161 static void txForce(struct tblock * tblk);
162 static void txLog(struct jfs_log *log, struct tblock *tblk,
164 static void txUpdateMap(struct tblock * tblk);
165 static void txRelease(struct tblock * tblk);
166 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
167 struct tlock * tlck);
168 static void LogSyncRelease(struct metapage * mp);
171 * transaction block/lock management
172 * ---------------------------------
176 * Get a transaction lock from the free list. If the number in use is
177 * greater than the high water mark, wake up the sync daemon. This should
178 * free some anonymous transaction locks. (TXN_LOCK must be held.)
180 static lid_t txLockAlloc(void)
184 INCREMENT(TxStat.txLockAlloc);
185 if (!TxAnchor.freelock) {
186 INCREMENT(TxStat.txLockAlloc_freelock);
189 while (!(lid = TxAnchor.freelock))
190 TXN_SLEEP(&TxAnchor.freelockwait);
191 TxAnchor.freelock = TxLock[lid].next;
192 HIGHWATERMARK(stattx.maxlid, lid);
193 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
194 jfs_info("txLockAlloc tlocks low");
196 wake_up_process(jfsSyncThread);
202 static void txLockFree(lid_t lid)
205 TxLock[lid].next = TxAnchor.freelock;
206 TxAnchor.freelock = lid;
207 TxAnchor.tlocksInUse--;
208 if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
209 jfs_info("txLockFree jfs_tlocks_low no more");
211 TXN_WAKEUP(&TxAnchor.lowlockwait);
213 TXN_WAKEUP(&TxAnchor.freelockwait);
219 * FUNCTION: initialize transaction management structures
223 * serialization: single thread at jfs_init()
230 /* Set defaults for nTxLock and nTxBlock if unset */
233 if (nTxBlock == -1) {
234 /* Base default on memory size */
236 if (si.totalram > (256 * 1024)) /* 1 GB */
239 nTxLock = si.totalram >> 2;
240 } else if (nTxBlock > (8 * 1024))
243 nTxLock = nTxBlock << 3;
246 nTxBlock = nTxLock >> 3;
248 /* Verify tunable parameters */
250 nTxBlock = 16; /* No one should set it this low */
251 if (nTxBlock > 65536)
254 nTxLock = 256; /* No one should set it this low */
258 printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
261 * initialize transaction block (tblock) table
263 * transaction id (tid) = tblock index
264 * tid = 0 is reserved.
266 TxLockLWM = (nTxLock * 4) / 10;
267 TxLockHWM = (nTxLock * 7) / 10;
268 TxLockVHWM = (nTxLock * 8) / 10;
270 size = sizeof(struct tblock) * nTxBlock;
271 TxBlock = vmalloc(size);
275 for (k = 1; k < nTxBlock - 1; k++) {
276 TxBlock[k].next = k + 1;
277 init_waitqueue_head(&TxBlock[k].gcwait);
278 init_waitqueue_head(&TxBlock[k].waitor);
281 init_waitqueue_head(&TxBlock[k].gcwait);
282 init_waitqueue_head(&TxBlock[k].waitor);
284 TxAnchor.freetid = 1;
285 init_waitqueue_head(&TxAnchor.freewait);
287 stattx.maxtid = 1; /* statistics */
290 * initialize transaction lock (tlock) table
292 * transaction lock id = tlock index
293 * tlock id = 0 is reserved.
295 size = sizeof(struct tlock) * nTxLock;
296 TxLock = vmalloc(size);
297 if (TxLock == NULL) {
302 /* initialize tlock table */
303 for (k = 1; k < nTxLock - 1; k++)
304 TxLock[k].next = k + 1;
306 init_waitqueue_head(&TxAnchor.freelockwait);
307 init_waitqueue_head(&TxAnchor.lowlockwait);
309 TxAnchor.freelock = 1;
310 TxAnchor.tlocksInUse = 0;
311 INIT_LIST_HEAD(&TxAnchor.anon_list);
312 INIT_LIST_HEAD(&TxAnchor.anon_list2);
315 INIT_LIST_HEAD(&TxAnchor.unlock_queue);
317 stattx.maxlid = 1; /* statistics */
325 * FUNCTION: clean up when module is unloaded
338 * FUNCTION: start a transaction.
340 * PARAMETER: sb - superblock
341 * flag - force for nested tx;
343 * RETURN: tid - transaction id
345 * note: flag force allows to start tx for nested tx
346 * to prevent deadlock on logsync barrier;
348 tid_t txBegin(struct super_block *sb, int flag)
354 jfs_info("txBegin: flag = 0x%x", flag);
355 log = JFS_SBI(sb)->log;
358 jfs_error(sb, "read-only filesystem\n");
364 INCREMENT(TxStat.txBegin);
367 if (!(flag & COMMIT_FORCE)) {
369 * synchronize with logsync barrier
371 if (test_bit(log_SYNCBARRIER, &log->flag) ||
372 test_bit(log_QUIESCE, &log->flag)) {
373 INCREMENT(TxStat.txBegin_barrier);
374 TXN_SLEEP(&log->syncwait);
380 * Don't begin transaction if we're getting starved for tlocks
381 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
384 if (TxAnchor.tlocksInUse > TxLockVHWM) {
385 INCREMENT(TxStat.txBegin_lockslow);
386 TXN_SLEEP(&TxAnchor.lowlockwait);
392 * allocate transaction id/block
394 if ((t = TxAnchor.freetid) == 0) {
395 jfs_info("txBegin: waiting for free tid");
396 INCREMENT(TxStat.txBegin_freetid);
397 TXN_SLEEP(&TxAnchor.freewait);
401 tblk = tid_to_tblock(t);
403 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
404 /* Don't let a non-forced transaction take the last tblk */
405 jfs_info("txBegin: waiting for free tid");
406 INCREMENT(TxStat.txBegin_freetid);
407 TXN_SLEEP(&TxAnchor.freewait);
411 TxAnchor.freetid = tblk->next;
414 * initialize transaction
418 * We can't zero the whole thing or we screw up another thread being
419 * awakened after sleeping on tblk->waitor
421 * memset(tblk, 0, sizeof(struct tblock));
423 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
427 tblk->logtid = log->logtid;
431 HIGHWATERMARK(stattx.maxtid, t); /* statistics */
432 INCREMENT(stattx.ntid); /* statistics */
436 jfs_info("txBegin: returning tid = %d", t);
442 * NAME: txBeginAnon()
444 * FUNCTION: start an anonymous transaction.
445 * Blocks if logsync or available tlocks are low to prevent
446 * anonymous tlocks from depleting supply.
448 * PARAMETER: sb - superblock
452 void txBeginAnon(struct super_block *sb)
456 log = JFS_SBI(sb)->log;
459 INCREMENT(TxStat.txBeginAnon);
463 * synchronize with logsync barrier
465 if (test_bit(log_SYNCBARRIER, &log->flag) ||
466 test_bit(log_QUIESCE, &log->flag)) {
467 INCREMENT(TxStat.txBeginAnon_barrier);
468 TXN_SLEEP(&log->syncwait);
473 * Don't begin transaction if we're getting starved for tlocks
475 if (TxAnchor.tlocksInUse > TxLockVHWM) {
476 INCREMENT(TxStat.txBeginAnon_lockslow);
477 TXN_SLEEP(&TxAnchor.lowlockwait);
486 * function: free specified transaction block.
488 * logsync barrier processing:
492 void txEnd(tid_t tid)
494 struct tblock *tblk = tid_to_tblock(tid);
497 jfs_info("txEnd: tid = %d", tid);
501 * wakeup transactions waiting on the page locked
502 * by the current transaction
504 TXN_WAKEUP(&tblk->waitor);
506 log = JFS_SBI(tblk->sb)->log;
509 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
510 * otherwise, we would be left with a transaction that may have been
513 * Lazy commit thread will turn off tblkGC_LAZY before calling this
516 if (tblk->flag & tblkGC_LAZY) {
517 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
520 spin_lock_irq(&log->gclock); // LOGGC_LOCK
521 tblk->flag |= tblkGC_UNLOCKED;
522 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
526 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
528 assert(tblk->next == 0);
531 * insert tblock back on freelist
533 tblk->next = TxAnchor.freetid;
534 TxAnchor.freetid = tid;
537 * mark the tblock not active
539 if (--log->active == 0) {
540 clear_bit(log_FLUSH, &log->flag);
543 * synchronize with logsync barrier
545 if (test_bit(log_SYNCBARRIER, &log->flag)) {
548 /* write dirty metadata & forward log syncpt */
551 jfs_info("log barrier off: 0x%x", log->lsn);
553 /* enable new transactions start */
554 clear_bit(log_SYNCBARRIER, &log->flag);
556 /* wakeup all waitors for logsync barrier */
557 TXN_WAKEUP(&log->syncwait);
566 * wakeup all waitors for a free tblock
568 TXN_WAKEUP(&TxAnchor.freewait);
574 * function: acquire a transaction lock on the specified <mp>
578 * return: transaction lock id
582 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
585 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
590 struct xtlock *xtlck;
591 struct linelock *linelock;
597 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
598 !(mp->xflag & COMMIT_PAGE)) {
600 * Directory inode is special. It can have both an xtree tlock
601 * and a dtree tlock associated with it.
608 /* is page not locked by a transaction ? */
612 jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
614 /* is page locked by the requester transaction ? */
615 tlck = lid_to_tlock(lid);
616 if ((xtid = tlck->tid) == tid) {
622 * is page locked by anonymous transaction/lock ?
624 * (page update without transaction (i.e., file write) is
625 * locked under anonymous transaction tid = 0:
626 * anonymous tlocks maintained on anonymous tlock list of
627 * the inode of the page and available to all anonymous
628 * transactions until txCommit() time at which point
629 * they are transferred to the transaction tlock list of
630 * the committing transaction of the inode)
635 tblk = tid_to_tblock(tid);
637 * The order of the tlocks in the transaction is important
638 * (during truncate, child xtree pages must be freed before
639 * parent's tlocks change the working map).
640 * Take tlock off anonymous list and add to tail of
643 * Note: We really need to get rid of the tid & lid and
644 * use list_head's. This code is getting UGLY!
646 if (jfs_ip->atlhead == lid) {
647 if (jfs_ip->atltail == lid) {
648 /* only anonymous txn.
649 * Remove from anon_list
652 list_del_init(&jfs_ip->anon_inode_list);
655 jfs_ip->atlhead = tlck->next;
658 for (last = jfs_ip->atlhead;
659 lid_to_tlock(last)->next != lid;
660 last = lid_to_tlock(last)->next) {
663 lid_to_tlock(last)->next = tlck->next;
664 if (jfs_ip->atltail == lid)
665 jfs_ip->atltail = last;
668 /* insert the tlock at tail of transaction tlock list */
671 lid_to_tlock(tblk->last)->next = lid;
687 tlck = lid_to_tlock(lid);
696 /* mark tlock for meta-data page */
697 if (mp->xflag & COMMIT_PAGE) {
699 tlck->flag = tlckPAGELOCK;
701 /* mark the page dirty and nohomeok */
702 metapage_nohomeok(mp);
704 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
705 mp, mp->nohomeok, tid, tlck);
707 /* if anonymous transaction, and buffer is on the group
708 * commit synclist, mark inode to show this. This will
709 * prevent the buffer from being marked nohomeok for too
712 if ((tid == 0) && mp->lsn)
713 set_cflag(COMMIT_Synclist, ip);
715 /* mark tlock for in-memory inode */
717 tlck->flag = tlckINODELOCK;
719 if (S_ISDIR(ip->i_mode))
720 tlck->flag |= tlckDIRECTORY;
724 /* bind the tlock and the page */
733 * enqueue transaction lock to transaction/inode
735 /* insert the tlock at tail of transaction tlock list */
737 tblk = tid_to_tblock(tid);
739 lid_to_tlock(tblk->last)->next = lid;
745 /* anonymous transaction:
746 * insert the tlock at head of inode anonymous tlock list
749 tlck->next = jfs_ip->atlhead;
750 jfs_ip->atlhead = lid;
751 if (tlck->next == 0) {
752 /* This inode's first anonymous transaction */
753 jfs_ip->atltail = lid;
755 list_add_tail(&jfs_ip->anon_inode_list,
756 &TxAnchor.anon_list);
761 /* initialize type dependent area for linelock */
762 linelock = (struct linelock *) & tlck->lock;
764 linelock->flag = tlckLINELOCK;
765 linelock->maxcnt = TLOCKSHORT;
768 switch (type & tlckTYPE) {
770 linelock->l2linesize = L2DTSLOTSIZE;
774 linelock->l2linesize = L2XTSLOTSIZE;
776 xtlck = (struct xtlock *) linelock;
777 xtlck->header.offset = 0;
778 xtlck->header.length = 2;
780 if (type & tlckNEW) {
781 xtlck->lwm.offset = XTENTRYSTART;
783 if (mp->xflag & COMMIT_PAGE)
784 p = (xtpage_t *) mp->data;
786 p = &jfs_ip->i_xtroot;
788 le16_to_cpu(p->header.nextindex);
790 xtlck->lwm.length = 0; /* ! */
791 xtlck->twm.offset = 0;
792 xtlck->hwm.offset = 0;
798 linelock->l2linesize = L2INODESLOTSIZE;
802 linelock->l2linesize = L2DATASLOTSIZE;
806 jfs_err("UFO tlock:0x%p", tlck);
810 * update tlock vector
818 * page is being locked by another transaction:
821 /* Only locks on ipimap or ipaimap should reach here */
822 /* assert(jfs_ip->fileset == AGGREGATE_I); */
823 if (jfs_ip->fileset != AGGREGATE_I) {
824 printk(KERN_ERR "txLock: trying to lock locked page!");
825 print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
827 print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
829 print_hex_dump(KERN_ERR, "Locker's tblock: ",
830 DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
831 sizeof(struct tblock), 0);
832 print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
833 tlck, sizeof(*tlck), 0);
836 INCREMENT(stattx.waitlock); /* statistics */
838 release_metapage(mp);
840 xtid = tlck->tid; /* reacquire after dropping TXN_LOCK */
842 jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
845 /* Recheck everything since dropping TXN_LOCK */
846 if (xtid && (tlck->mp == mp) && (mp->lid == lid))
847 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
850 jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
858 * FUNCTION: Release buffers associated with transaction locks, but don't
859 * mark homeok yet. The allows other transactions to modify
860 * buffers, but won't let them go to disk until commit record
861 * actually gets written.
866 * RETURN: Errors from subroutines.
868 static void txRelease(struct tblock * tblk)
876 for (lid = tblk->next; lid; lid = tlck->next) {
877 tlck = lid_to_tlock(lid);
878 if ((mp = tlck->mp) != NULL &&
879 (tlck->type & tlckBTROOT) == 0) {
880 assert(mp->xflag & COMMIT_PAGE);
886 * wakeup transactions waiting on a page locked
887 * by the current transaction
889 TXN_WAKEUP(&tblk->waitor);
897 * FUNCTION: Initiates pageout of pages modified by tid in journalled
898 * objects and frees their lockwords.
900 static void txUnlock(struct tblock * tblk)
903 struct linelock *linelock;
904 lid_t lid, next, llid, k;
910 jfs_info("txUnlock: tblk = 0x%p", tblk);
911 log = JFS_SBI(tblk->sb)->log;
914 * mark page under tlock homeok (its log has been written):
916 for (lid = tblk->next; lid; lid = next) {
917 tlck = lid_to_tlock(lid);
920 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
922 /* unbind page from tlock */
923 if ((mp = tlck->mp) != NULL &&
924 (tlck->type & tlckBTROOT) == 0) {
925 assert(mp->xflag & COMMIT_PAGE);
931 assert(mp->nohomeok > 0);
932 _metapage_homeok(mp);
934 /* inherit younger/larger clsn */
935 LOGSYNC_LOCK(log, flags);
937 logdiff(difft, tblk->clsn, log);
938 logdiff(diffp, mp->clsn, log);
940 mp->clsn = tblk->clsn;
942 mp->clsn = tblk->clsn;
943 LOGSYNC_UNLOCK(log, flags);
945 assert(!(tlck->flag & tlckFREEPAGE));
950 /* insert tlock, and linelock(s) of the tlock if any,
951 * at head of freelist
955 llid = ((struct linelock *) & tlck->lock)->next;
957 linelock = (struct linelock *) lid_to_tlock(llid);
966 tblk->next = tblk->last = 0;
969 * remove tblock from logsynclist
970 * (allocation map pages inherited lsn of tblk and
971 * has been inserted in logsync list at txUpdateMap())
974 LOGSYNC_LOCK(log, flags);
976 list_del(&tblk->synclist);
977 LOGSYNC_UNLOCK(log, flags);
984 * function: allocate a transaction lock for freed page/entry;
985 * for freed page, maplock is used as xtlock/dtlock type;
987 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
989 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
993 struct maplock *maplock;
1000 lid = txLockAlloc();
1001 tlck = lid_to_tlock(lid);
1008 /* bind the tlock and the object */
1009 tlck->flag = tlckINODELOCK;
1010 if (S_ISDIR(ip->i_mode))
1011 tlck->flag |= tlckDIRECTORY;
1018 * enqueue transaction lock to transaction/inode
1020 /* insert the tlock at tail of transaction tlock list */
1022 tblk = tid_to_tblock(tid);
1024 lid_to_tlock(tblk->last)->next = lid;
1030 /* anonymous transaction:
1031 * insert the tlock at head of inode anonymous tlock list
1034 tlck->next = jfs_ip->atlhead;
1035 jfs_ip->atlhead = lid;
1036 if (tlck->next == 0) {
1037 /* This inode's first anonymous transaction */
1038 jfs_ip->atltail = lid;
1039 list_add_tail(&jfs_ip->anon_inode_list,
1040 &TxAnchor.anon_list);
1046 /* initialize type dependent area for maplock */
1047 maplock = (struct maplock *) & tlck->lock;
1049 maplock->maxcnt = 0;
1058 * function: allocate a transaction lock for log vector list
1060 struct linelock *txLinelock(struct linelock * tlock)
1064 struct linelock *linelock;
1068 /* allocate a TxLock structure */
1069 lid = txLockAlloc();
1070 tlck = lid_to_tlock(lid);
1074 /* initialize linelock */
1075 linelock = (struct linelock *) tlck;
1077 linelock->flag = tlckLINELOCK;
1078 linelock->maxcnt = TLOCKLONG;
1079 linelock->index = 0;
1080 if (tlck->flag & tlckDIRECTORY)
1081 linelock->flag |= tlckDIRECTORY;
1083 /* append linelock after tlock */
1084 linelock->next = tlock->next;
1091 * transaction commit management
1092 * -----------------------------
1098 * FUNCTION: commit the changes to the objects specified in
1099 * clist. For journalled segments only the
1100 * changes of the caller are committed, ie by tid.
1101 * for non-journalled segments the data are flushed to
1102 * disk and then the change to the disk inode and indirect
1103 * blocks committed (so blocks newly allocated to the
1104 * segment will be made a part of the segment atomically).
1106 * all of the segments specified in clist must be in
1107 * one file system. no more than 6 segments are needed
1108 * to handle all unix svcs.
1110 * if the i_nlink field (i.e. disk inode link count)
1111 * is zero, and the type of inode is a regular file or
1112 * directory, or symbolic link , the inode is truncated
1113 * to zero length. the truncation is committed but the
1114 * VM resources are unaffected until it is closed (see
1122 * on entry the inode lock on each segment is assumed
1127 int txCommit(tid_t tid, /* transaction identifier */
1128 int nip, /* number of inodes to commit */
1129 struct inode **iplist, /* list of inode to commit */
1134 struct jfs_log *log;
1135 struct tblock *tblk;
1138 struct jfs_inode_info *jfs_ip;
1141 struct super_block *sb;
1143 jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1144 /* is read-only file system ? */
1145 if (isReadOnly(iplist[0])) {
1150 sb = cd.sb = iplist[0]->i_sb;
1154 tid = txBegin(sb, 0);
1155 tblk = tid_to_tblock(tid);
1158 * initialize commit structure
1160 log = JFS_SBI(sb)->log;
1163 /* initialize log record descriptor in commit */
1165 lrd->logtid = cpu_to_le32(tblk->logtid);
1168 tblk->xflag |= flag;
1170 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1171 tblk->xflag |= COMMIT_LAZY;
1173 * prepare non-journaled objects for commit
1175 * flush data pages of non-journaled file
1176 * to prevent the file getting non-initialized disk blocks
1184 * acquire transaction lock on (on-disk) inodes
1186 * update on-disk inode from in-memory inode
1187 * acquiring transaction locks for AFTER records
1188 * on the on-disk inode of file object
1190 * sort the inodes array by inode number in descending order
1191 * to prevent deadlock when acquiring transaction lock
1192 * of on-disk inodes on multiple on-disk inode pages by
1193 * multiple concurrent transactions
1195 for (k = 0; k < cd.nip; k++) {
1196 top = (cd.iplist[k])->i_ino;
1197 for (n = k + 1; n < cd.nip; n++) {
1199 if (ip->i_ino > top) {
1201 cd.iplist[n] = cd.iplist[k];
1207 jfs_ip = JFS_IP(ip);
1210 * BUGBUG - This code has temporarily been removed. The
1211 * intent is to ensure that any file data is written before
1212 * the metadata is committed to the journal. This prevents
1213 * uninitialized data from appearing in a file after the
1214 * journal has been replayed. (The uninitialized data
1215 * could be sensitive data removed by another user.)
1217 * The problem now is that we are holding the IWRITELOCK
1218 * on the inode, and calling filemap_fdatawrite on an
1219 * unmapped page will cause a deadlock in jfs_get_block.
1221 * The long term solution is to pare down the use of
1222 * IWRITELOCK. We are currently holding it too long.
1223 * We could also be smarter about which data pages need
1224 * to be written before the transaction is committed and
1225 * when we don't need to worry about it at all.
1227 * if ((!S_ISDIR(ip->i_mode))
1228 * && (tblk->flag & COMMIT_DELETE) == 0)
1229 * filemap_write_and_wait(ip->i_mapping);
1233 * Mark inode as not dirty. It will still be on the dirty
1234 * inode list, but we'll know not to commit it again unless
1235 * it gets marked dirty again
1237 clear_cflag(COMMIT_Dirty, ip);
1239 /* inherit anonymous tlock(s) of inode */
1240 if (jfs_ip->atlhead) {
1241 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1242 tblk->next = jfs_ip->atlhead;
1244 tblk->last = jfs_ip->atltail;
1245 jfs_ip->atlhead = jfs_ip->atltail = 0;
1247 list_del_init(&jfs_ip->anon_inode_list);
1252 * acquire transaction lock on on-disk inode page
1253 * (become first tlock of the tblk's tlock list)
1255 if (((rc = diWrite(tid, ip))))
1260 * write log records from transaction locks
1262 * txUpdateMap() resets XAD_NEW in XAD.
1264 txLog(log, tblk, &cd);
1267 * Ensure that inode isn't reused before
1268 * lazy commit thread finishes processing
1270 if (tblk->xflag & COMMIT_DELETE) {
1273 * Avoid a rare deadlock
1275 * If the inode is locked, we may be blocked in
1276 * jfs_commit_inode. If so, we don't want the
1277 * lazy_commit thread doing the last iput() on the inode
1278 * since that may block on the locked inode. Instead,
1279 * commit the transaction synchronously, so the last iput
1280 * will be done by the calling thread (or later)
1283 * I believe this code is no longer needed. Splitting I_LOCK
1284 * into two bits, I_NEW and I_SYNC should prevent this
1285 * deadlock as well. But since I don't have a JFS testload
1286 * to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
1289 if (tblk->u.ip->i_state & I_SYNC)
1290 tblk->xflag &= ~COMMIT_LAZY;
1293 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1294 ((tblk->u.ip->i_nlink == 0) &&
1295 !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1298 * write COMMIT log record
1300 lrd->type = cpu_to_le16(LOG_COMMIT);
1302 lmLog(log, tblk, lrd, NULL);
1304 lmGroupCommit(log, tblk);
1307 * - transaction is now committed -
1311 * force pages in careful update
1312 * (imap addressing structure update)
1314 if (flag & COMMIT_FORCE)
1318 * update allocation map.
1320 * update inode allocation map and inode:
1321 * free pager lock on memory object of inode if any.
1322 * update block allocation map.
1324 * txUpdateMap() resets XAD_NEW in XAD.
1326 if (tblk->xflag & COMMIT_FORCE)
1330 * free transaction locks and pageout/free pages
1334 if ((tblk->flag & tblkGC_LAZY) == 0)
1339 * reset in-memory object state
1341 for (k = 0; k < cd.nip; k++) {
1343 jfs_ip = JFS_IP(ip);
1346 * reset in-memory inode state
1357 jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1364 * FUNCTION: Writes AFTER log records for all lines modified
1365 * by tid for segments specified by inodes in comdata.
1366 * Code assumes only WRITELOCKS are recorded in lockwords.
1372 static void txLog(struct jfs_log *log, struct tblock *tblk, struct commit *cd)
1377 struct lrd *lrd = &cd->lrd;
1380 * write log record(s) for each tlock of transaction,
1382 for (lid = tblk->next; lid; lid = tlck->next) {
1383 tlck = lid_to_tlock(lid);
1385 tlck->flag |= tlckLOG;
1387 /* initialize lrd common */
1389 lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1390 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1391 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1393 /* write log record of page from the tlock */
1394 switch (tlck->type & tlckTYPE) {
1396 xtLog(log, tblk, lrd, tlck);
1400 dtLog(log, tblk, lrd, tlck);
1404 diLog(log, tblk, lrd, tlck, cd);
1408 mapLog(log, tblk, lrd, tlck);
1412 dataLog(log, tblk, lrd, tlck);
1416 jfs_err("UFO tlock:0x%p", tlck);
1426 * function: log inode tlock and format maplock to update bmap;
1428 static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
1429 struct tlock *tlck, struct commit *cd)
1431 struct metapage *mp;
1433 struct pxd_lock *pxdlock;
1437 /* initialize as REDOPAGE record format */
1438 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1439 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1441 pxd = &lrd->log.redopage.pxd;
1446 if (tlck->type & tlckENTRY) {
1447 /* log after-image for logredo(): */
1448 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1449 PXDaddress(pxd, mp->index);
1451 mp->logical_size >> tblk->sb->s_blocksize_bits);
1452 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1454 /* mark page as homeward bound */
1455 tlck->flag |= tlckWRITEPAGE;
1456 } else if (tlck->type & tlckFREE) {
1460 * (pages of the freed inode extent have been invalidated and
1461 * a maplock for free of the extent has been formatted at
1464 * the tlock had been acquired on the inode allocation map page
1465 * (iag) that specifies the freed extent, even though the map
1466 * page is not itself logged, to prevent pageout of the map
1467 * page before the log;
1470 /* log LOG_NOREDOINOEXT of the freed inode extent for
1471 * logredo() to start NoRedoPage filters, and to update
1472 * imap and bmap for free of the extent;
1474 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1476 * For the LOG_NOREDOINOEXT record, we need
1477 * to pass the IAG number and inode extent
1478 * index (within that IAG) from which the
1479 * extent is being released. These have been
1480 * passed to us in the iplist[1] and iplist[2].
1482 lrd->log.noredoinoext.iagnum =
1483 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1484 lrd->log.noredoinoext.inoext_idx =
1485 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1487 pxdlock = (struct pxd_lock *) & tlck->lock;
1488 *pxd = pxdlock->pxd;
1489 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1492 tlck->flag |= tlckUPDATEMAP;
1494 /* mark page as homeward bound */
1495 tlck->flag |= tlckWRITEPAGE;
1497 jfs_err("diLog: UFO type tlck:0x%p", tlck);
1504 * function: log data tlock
1506 static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
1509 struct metapage *mp;
1514 /* initialize as REDOPAGE record format */
1515 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1516 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1518 pxd = &lrd->log.redopage.pxd;
1520 /* log after-image for logredo(): */
1521 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1523 if (jfs_dirtable_inline(tlck->ip)) {
1525 * The table has been truncated, we've must have deleted
1526 * the last entry, so don't bother logging this
1530 metapage_homeok(mp);
1531 discard_metapage(mp);
1536 PXDaddress(pxd, mp->index);
1537 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1539 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1541 /* mark page as homeward bound */
1542 tlck->flag |= tlckWRITEPAGE;
1550 * function: log dtree tlock and format maplock to update bmap;
1552 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1553 struct tlock * tlck)
1555 struct metapage *mp;
1556 struct pxd_lock *pxdlock;
1561 /* initialize as REDOPAGE/NOREDOPAGE record format */
1562 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1563 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1565 pxd = &lrd->log.redopage.pxd;
1567 if (tlck->type & tlckBTROOT)
1568 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1571 * page extension via relocation: entry insertion;
1572 * page extension in-place: entry insertion;
1573 * new right page from page split, reinitialized in-line
1574 * root from root page split: entry insertion;
1576 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1577 /* log after-image of the new page for logredo():
1578 * mark log (LOG_NEW) for logredo() to initialize
1579 * freelist and update bmap for alloc of the new page;
1581 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1582 if (tlck->type & tlckEXTEND)
1583 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1585 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1586 PXDaddress(pxd, mp->index);
1588 mp->logical_size >> tblk->sb->s_blocksize_bits);
1589 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1591 /* format a maplock for txUpdateMap() to update bPMAP for
1592 * alloc of the new page;
1594 if (tlck->type & tlckBTROOT)
1596 tlck->flag |= tlckUPDATEMAP;
1597 pxdlock = (struct pxd_lock *) & tlck->lock;
1598 pxdlock->flag = mlckALLOCPXD;
1599 pxdlock->pxd = *pxd;
1603 /* mark page as homeward bound */
1604 tlck->flag |= tlckWRITEPAGE;
1609 * entry insertion/deletion,
1610 * sibling page link update (old right page before split);
1612 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1613 /* log after-image for logredo(): */
1614 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1615 PXDaddress(pxd, mp->index);
1617 mp->logical_size >> tblk->sb->s_blocksize_bits);
1618 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1620 /* mark page as homeward bound */
1621 tlck->flag |= tlckWRITEPAGE;
1626 * page deletion: page has been invalidated
1627 * page relocation: source extent
1629 * a maplock for free of the page has been formatted
1630 * at txLock() time);
1632 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1633 /* log LOG_NOREDOPAGE of the deleted page for logredo()
1634 * to start NoRedoPage filter and to update bmap for free
1635 * of the deletd page
1637 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1638 pxdlock = (struct pxd_lock *) & tlck->lock;
1639 *pxd = pxdlock->pxd;
1640 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1642 /* a maplock for txUpdateMap() for free of the page
1643 * has been formatted at txLock() time;
1645 tlck->flag |= tlckUPDATEMAP;
1653 * function: log xtree tlock and format maplock to update bmap;
1655 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1656 struct tlock * tlck)
1659 struct metapage *mp;
1661 struct xtlock *xtlck;
1662 struct maplock *maplock;
1663 struct xdlistlock *xadlock;
1664 struct pxd_lock *pxdlock;
1671 /* initialize as REDOPAGE/NOREDOPAGE record format */
1672 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1673 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1675 page_pxd = &lrd->log.redopage.pxd;
1677 if (tlck->type & tlckBTROOT) {
1678 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1679 p = &JFS_IP(ip)->i_xtroot;
1680 if (S_ISDIR(ip->i_mode))
1681 lrd->log.redopage.type |=
1682 cpu_to_le16(LOG_DIR_XTREE);
1684 p = (xtpage_t *) mp->data;
1685 next = le16_to_cpu(p->header.nextindex);
1687 xtlck = (struct xtlock *) & tlck->lock;
1689 maplock = (struct maplock *) & tlck->lock;
1690 xadlock = (struct xdlistlock *) maplock;
1693 * entry insertion/extension;
1694 * sibling page link update (old right page before split);
1696 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1697 /* log after-image for logredo():
1698 * logredo() will update bmap for alloc of new/extended
1699 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1700 * after-image of XADlist;
1701 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1702 * applying the after-image to the meta-data page.
1704 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1705 PXDaddress(page_pxd, mp->index);
1707 mp->logical_size >> tblk->sb->s_blocksize_bits);
1708 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1710 /* format a maplock for txUpdateMap() to update bPMAP
1711 * for alloc of new/extended extents of XAD[lwm:next)
1712 * from the page itself;
1713 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1715 lwm = xtlck->lwm.offset;
1717 lwm = XTPAGEMAXSLOT;
1722 jfs_err("xtLog: lwm > next");
1725 tlck->flag |= tlckUPDATEMAP;
1726 xadlock->flag = mlckALLOCXADLIST;
1727 xadlock->count = next - lwm;
1728 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1732 * Lazy commit may allow xtree to be modified before
1733 * txUpdateMap runs. Copy xad into linelock to
1734 * preserve correct data.
1736 * We can fit twice as may pxd's as xads in the lock
1738 xadlock->flag = mlckALLOCPXDLIST;
1739 pxd = xadlock->xdlist = &xtlck->pxdlock;
1740 for (i = 0; i < xadlock->count; i++) {
1741 PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
1742 PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
1743 p->xad[lwm + i].flag &=
1744 ~(XAD_NEW | XAD_EXTENDED);
1749 * xdlist will point to into inode's xtree, ensure
1750 * that transaction is not committed lazily.
1752 xadlock->flag = mlckALLOCXADLIST;
1753 xadlock->xdlist = &p->xad[lwm];
1754 tblk->xflag &= ~COMMIT_LAZY;
1756 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d",
1757 tlck->ip, mp, tlck, lwm, xadlock->count);
1762 /* mark page as homeward bound */
1763 tlck->flag |= tlckWRITEPAGE;
1769 * page deletion: file deletion/truncation (ref. xtTruncate())
1771 * (page will be invalidated after log is written and bmap
1772 * is updated from the page);
1774 if (tlck->type & tlckFREE) {
1775 /* LOG_NOREDOPAGE log for NoRedoPage filter:
1776 * if page free from file delete, NoRedoFile filter from
1777 * inode image of zero link count will subsume NoRedoPage
1778 * filters for each page;
1779 * if page free from file truncattion, write NoRedoPage
1782 * upadte of block allocation map for the page itself:
1783 * if page free from deletion and truncation, LOG_UPDATEMAP
1784 * log for the page itself is generated from processing
1785 * its parent page xad entries;
1787 /* if page free from file truncation, log LOG_NOREDOPAGE
1788 * of the deleted page for logredo() to start NoRedoPage
1789 * filter for the page;
1791 if (tblk->xflag & COMMIT_TRUNCATE) {
1792 /* write NOREDOPAGE for the page */
1793 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1794 PXDaddress(page_pxd, mp->index);
1796 mp->logical_size >> tblk->sb->
1799 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1801 if (tlck->type & tlckBTROOT) {
1802 /* Empty xtree must be logged */
1803 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1805 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1809 /* init LOG_UPDATEMAP of the freed extents
1810 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1811 * for logredo() to update bmap;
1813 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1814 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1815 xtlck = (struct xtlock *) & tlck->lock;
1816 hwm = xtlck->hwm.offset;
1817 lrd->log.updatemap.nxd =
1818 cpu_to_le16(hwm - XTENTRYSTART + 1);
1819 /* reformat linelock for lmLog() */
1820 xtlck->header.offset = XTENTRYSTART;
1821 xtlck->header.length = hwm - XTENTRYSTART + 1;
1823 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1825 /* format a maplock for txUpdateMap() to update bmap
1826 * to free extents of XAD[XTENTRYSTART:hwm) from the
1827 * deleted page itself;
1829 tlck->flag |= tlckUPDATEMAP;
1830 xadlock->count = hwm - XTENTRYSTART + 1;
1831 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1835 * Lazy commit may allow xtree to be modified before
1836 * txUpdateMap runs. Copy xad into linelock to
1837 * preserve correct data.
1839 * We can fit twice as may pxd's as xads in the lock
1841 xadlock->flag = mlckFREEPXDLIST;
1842 pxd = xadlock->xdlist = &xtlck->pxdlock;
1843 for (i = 0; i < xadlock->count; i++) {
1845 addressXAD(&p->xad[XTENTRYSTART + i]));
1847 lengthXAD(&p->xad[XTENTRYSTART + i]));
1852 * xdlist will point to into inode's xtree, ensure
1853 * that transaction is not committed lazily.
1855 xadlock->flag = mlckFREEXADLIST;
1856 xadlock->xdlist = &p->xad[XTENTRYSTART];
1857 tblk->xflag &= ~COMMIT_LAZY;
1859 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1860 tlck->ip, mp, xadlock->count);
1864 /* mark page as invalid */
1865 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1866 && !(tlck->type & tlckBTROOT))
1867 tlck->flag |= tlckFREEPAGE;
1869 else (tblk->xflag & COMMIT_PMAP)
1876 * page/entry truncation: file truncation (ref. xtTruncate())
1878 * |----------+------+------+---------------|
1880 * | | hwm - hwm before truncation
1881 * | next - truncation point
1882 * lwm - lwm before truncation
1885 if (tlck->type & tlckTRUNCATE) {
1886 pxd_t pxd; /* truncated extent of xad */
1890 * For truncation the entire linelock may be used, so it would
1891 * be difficult to store xad list in linelock itself.
1892 * Therefore, we'll just force transaction to be committed
1893 * synchronously, so that xtree pages won't be changed before
1896 tblk->xflag &= ~COMMIT_LAZY;
1897 lwm = xtlck->lwm.offset;
1899 lwm = XTPAGEMAXSLOT;
1900 hwm = xtlck->hwm.offset;
1901 twm = xtlck->twm.offset;
1906 /* log after-image for logredo():
1908 * logredo() will update bmap for alloc of new/extended
1909 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1910 * after-image of XADlist;
1911 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1912 * applying the after-image to the meta-data page.
1914 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1915 PXDaddress(page_pxd, mp->index);
1917 mp->logical_size >> tblk->sb->s_blocksize_bits);
1918 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1921 * truncate entry XAD[twm == next - 1]:
1923 if (twm == next - 1) {
1924 /* init LOG_UPDATEMAP for logredo() to update bmap for
1925 * free of truncated delta extent of the truncated
1926 * entry XAD[next - 1]:
1927 * (xtlck->pxdlock = truncated delta extent);
1929 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1930 /* assert(pxdlock->type & tlckTRUNCATE); */
1931 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1932 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1933 lrd->log.updatemap.nxd = cpu_to_le16(1);
1934 lrd->log.updatemap.pxd = pxdlock->pxd;
1935 pxd = pxdlock->pxd; /* save to format maplock */
1937 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1941 * free entries XAD[next:hwm]:
1944 /* init LOG_UPDATEMAP of the freed extents
1945 * XAD[next:hwm] from the deleted page itself
1946 * for logredo() to update bmap;
1948 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1949 lrd->log.updatemap.type =
1950 cpu_to_le16(LOG_FREEXADLIST);
1951 xtlck = (struct xtlock *) & tlck->lock;
1952 hwm = xtlck->hwm.offset;
1953 lrd->log.updatemap.nxd =
1954 cpu_to_le16(hwm - next + 1);
1955 /* reformat linelock for lmLog() */
1956 xtlck->header.offset = next;
1957 xtlck->header.length = hwm - next + 1;
1960 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1964 * format maplock(s) for txUpdateMap() to update bmap
1969 * allocate entries XAD[lwm:next):
1972 /* format a maplock for txUpdateMap() to update bPMAP
1973 * for alloc of new/extended extents of XAD[lwm:next)
1974 * from the page itself;
1975 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1977 tlck->flag |= tlckUPDATEMAP;
1978 xadlock->flag = mlckALLOCXADLIST;
1979 xadlock->count = next - lwm;
1980 xadlock->xdlist = &p->xad[lwm];
1982 jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d",
1983 tlck->ip, mp, xadlock->count, lwm, next);
1989 * truncate entry XAD[twm == next - 1]:
1991 if (twm == next - 1) {
1992 /* format a maplock for txUpdateMap() to update bmap
1993 * to free truncated delta extent of the truncated
1994 * entry XAD[next - 1];
1995 * (xtlck->pxdlock = truncated delta extent);
1997 tlck->flag |= tlckUPDATEMAP;
1998 pxdlock = (struct pxd_lock *) xadlock;
1999 pxdlock->flag = mlckFREEPXD;
2003 jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d",
2004 ip, mp, pxdlock->count, hwm);
2010 * free entries XAD[next:hwm]:
2013 /* format a maplock for txUpdateMap() to update bmap
2014 * to free extents of XAD[next:hwm] from thedeleted
2017 tlck->flag |= tlckUPDATEMAP;
2018 xadlock->flag = mlckFREEXADLIST;
2019 xadlock->count = hwm - next + 1;
2020 xadlock->xdlist = &p->xad[next];
2022 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d",
2023 tlck->ip, mp, xadlock->count, next, hwm);
2027 /* mark page as homeward bound */
2028 tlck->flag |= tlckWRITEPAGE;
2036 * function: log from maplock of freed data extents;
2038 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2039 struct tlock * tlck)
2041 struct pxd_lock *pxdlock;
2046 * page relocation: free the source page extent
2048 * a maplock for txUpdateMap() for free of the page
2049 * has been formatted at txLock() time saving the src
2050 * relocated page address;
2052 if (tlck->type & tlckRELOCATE) {
2053 /* log LOG_NOREDOPAGE of the old relocated page
2054 * for logredo() to start NoRedoPage filter;
2056 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2057 pxdlock = (struct pxd_lock *) & tlck->lock;
2058 pxd = &lrd->log.redopage.pxd;
2059 *pxd = pxdlock->pxd;
2060 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2062 /* (N.B. currently, logredo() does NOT update bmap
2063 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2064 * if page free from relocation, LOG_UPDATEMAP log is
2065 * specifically generated now for logredo()
2066 * to update bmap for free of src relocated page;
2067 * (new flag LOG_RELOCATE may be introduced which will
2068 * inform logredo() to start NORedoPage filter and also
2069 * update block allocation map at the same time, thus
2070 * avoiding an extra log write);
2072 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2073 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2074 lrd->log.updatemap.nxd = cpu_to_le16(1);
2075 lrd->log.updatemap.pxd = pxdlock->pxd;
2076 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2078 /* a maplock for txUpdateMap() for free of the page
2079 * has been formatted at txLock() time;
2081 tlck->flag |= tlckUPDATEMAP;
2086 * Otherwise it's not a relocate request
2090 /* log LOG_UPDATEMAP for logredo() to update bmap for
2091 * free of truncated/relocated delta extent of the data;
2092 * e.g.: external EA extent, relocated/truncated extent
2093 * from xtTailgate();
2095 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2096 pxdlock = (struct pxd_lock *) & tlck->lock;
2097 nlock = pxdlock->index;
2098 for (i = 0; i < nlock; i++, pxdlock++) {
2099 if (pxdlock->flag & mlckALLOCPXD)
2100 lrd->log.updatemap.type =
2101 cpu_to_le16(LOG_ALLOCPXD);
2103 lrd->log.updatemap.type =
2104 cpu_to_le16(LOG_FREEPXD);
2105 lrd->log.updatemap.nxd = cpu_to_le16(1);
2106 lrd->log.updatemap.pxd = pxdlock->pxd;
2108 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2109 jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2110 (ulong) addressPXD(&pxdlock->pxd),
2111 lengthPXD(&pxdlock->pxd));
2115 tlck->flag |= tlckUPDATEMAP;
2122 * function: acquire maplock for EA/ACL extents or
2123 * set COMMIT_INLINE flag;
2125 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2127 struct tlock *tlck = NULL;
2128 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2131 * format maplock for alloc of new EA extent
2134 /* Since the newea could be a completely zeroed entry we need to
2135 * check for the two flags which indicate we should actually
2136 * commit new EA data
2138 if (newea->flag & DXD_EXTENT) {
2139 tlck = txMaplock(tid, ip, tlckMAP);
2140 maplock = (struct pxd_lock *) & tlck->lock;
2141 pxdlock = (struct pxd_lock *) maplock;
2142 pxdlock->flag = mlckALLOCPXD;
2143 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2144 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2147 } else if (newea->flag & DXD_INLINE) {
2150 set_cflag(COMMIT_Inlineea, ip);
2155 * format maplock for free of old EA extent
2157 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2159 tlck = txMaplock(tid, ip, tlckMAP);
2160 maplock = (struct pxd_lock *) & tlck->lock;
2161 pxdlock = (struct pxd_lock *) maplock;
2164 pxdlock->flag = mlckFREEPXD;
2165 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2166 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2174 * function: synchronously write pages locked by transaction
2175 * after txLog() but before txUpdateMap();
2177 static void txForce(struct tblock * tblk)
2181 struct metapage *mp;
2184 * reverse the order of transaction tlocks in
2185 * careful update order of address index pages
2186 * (right to left, bottom up)
2188 tlck = lid_to_tlock(tblk->next);
2192 tlck = lid_to_tlock(lid);
2194 tlck->next = tblk->next;
2200 * synchronously write the page, and
2201 * hold the page for txUpdateMap();
2203 for (lid = tblk->next; lid; lid = next) {
2204 tlck = lid_to_tlock(lid);
2207 if ((mp = tlck->mp) != NULL &&
2208 (tlck->type & tlckBTROOT) == 0) {
2209 assert(mp->xflag & COMMIT_PAGE);
2211 if (tlck->flag & tlckWRITEPAGE) {
2212 tlck->flag &= ~tlckWRITEPAGE;
2214 /* do not release page to freelist */
2218 * The "right" thing to do here is to
2219 * synchronously write the metadata.
2220 * With the current implementation this
2221 * is hard since write_metapage requires
2222 * us to kunmap & remap the page. If we
2223 * have tlocks pointing into the metadata
2224 * pages, we don't want to do this. I think
2225 * we can get by with synchronously writing
2226 * the pages when they are released.
2228 assert(mp->nohomeok);
2229 set_bit(META_dirty, &mp->flag);
2230 set_bit(META_sync, &mp->flag);
2240 * function: update persistent allocation map (and working map
2245 static void txUpdateMap(struct tblock * tblk)
2248 struct inode *ipimap;
2251 struct maplock *maplock;
2252 struct pxd_lock pxdlock;
2255 struct metapage *mp = NULL;
2257 ipimap = JFS_SBI(tblk->sb)->ipimap;
2259 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2263 * update block allocation map
2265 * update allocation state in pmap (and wmap) and
2266 * update lsn of the pmap page;
2269 * scan each tlock/page of transaction for block allocation/free:
2271 * for each tlock/page of transaction, update map.
2272 * ? are there tlock for pmap and pwmap at the same time ?
2274 for (lid = tblk->next; lid; lid = tlck->next) {
2275 tlck = lid_to_tlock(lid);
2277 if ((tlck->flag & tlckUPDATEMAP) == 0)
2280 if (tlck->flag & tlckFREEPAGE) {
2282 * Another thread may attempt to reuse freed space
2283 * immediately, so we want to get rid of the metapage
2284 * before anyone else has a chance to get it.
2285 * Lock metapage, update maps, then invalidate
2289 ASSERT(mp->xflag & COMMIT_PAGE);
2295 * . in-line PXD list:
2296 * . out-of-line XAD list:
2298 maplock = (struct maplock *) & tlck->lock;
2299 nlock = maplock->index;
2301 for (k = 0; k < nlock; k++, maplock++) {
2303 * allocate blocks in persistent map:
2305 * blocks have been allocated from wmap at alloc time;
2307 if (maplock->flag & mlckALLOC) {
2308 txAllocPMap(ipimap, maplock, tblk);
2311 * free blocks in persistent and working map:
2312 * blocks will be freed in pmap and then in wmap;
2314 * ? tblock specifies the PMAP/PWMAP based upon
2317 * free blocks in persistent map:
2318 * blocks will be freed from wmap at last reference
2319 * release of the object for regular files;
2321 * Alway free blocks from both persistent & working
2322 * maps for directories
2324 else { /* (maplock->flag & mlckFREE) */
2326 if (tlck->flag & tlckDIRECTORY)
2327 txFreeMap(ipimap, maplock,
2328 tblk, COMMIT_PWMAP);
2330 txFreeMap(ipimap, maplock,
2334 if (tlck->flag & tlckFREEPAGE) {
2335 if (!(tblk->flag & tblkGC_LAZY)) {
2336 /* This is equivalent to txRelease */
2337 ASSERT(mp->lid == lid);
2340 assert(mp->nohomeok == 1);
2341 metapage_homeok(mp);
2342 discard_metapage(mp);
2347 * update inode allocation map
2349 * update allocation state in pmap and
2350 * update lsn of the pmap page;
2351 * update in-memory inode flag/state
2353 * unlock mapper/write lock
2355 if (tblk->xflag & COMMIT_CREATE) {
2356 diUpdatePMap(ipimap, tblk->ino, false, tblk);
2357 /* update persistent block allocation map
2358 * for the allocation of inode extent;
2360 pxdlock.flag = mlckALLOCPXD;
2361 pxdlock.pxd = tblk->u.ixpxd;
2363 txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2364 } else if (tblk->xflag & COMMIT_DELETE) {
2366 diUpdatePMap(ipimap, ip->i_ino, true, tblk);
2374 * function: allocate from persistent map;
2383 * allocate from persistent map;
2384 * free from persistent map;
2385 * (e.g., tmp file - free from working map at releae
2386 * of last reference);
2387 * free from persistent and working map;
2389 * lsn - log sequence number;
2391 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2392 struct tblock * tblk)
2394 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2395 struct xdlistlock *xadlistlock;
2399 struct pxd_lock *pxdlock;
2400 struct xdlistlock *pxdlistlock;
2405 * allocate from persistent map;
2407 if (maplock->flag & mlckALLOCXADLIST) {
2408 xadlistlock = (struct xdlistlock *) maplock;
2409 xad = xadlistlock->xdlist;
2410 for (n = 0; n < xadlistlock->count; n++, xad++) {
2411 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2412 xaddr = addressXAD(xad);
2413 xlen = lengthXAD(xad);
2414 dbUpdatePMap(ipbmap, false, xaddr,
2416 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2417 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2418 (ulong) xaddr, xlen);
2421 } else if (maplock->flag & mlckALLOCPXD) {
2422 pxdlock = (struct pxd_lock *) maplock;
2423 xaddr = addressPXD(&pxdlock->pxd);
2424 xlen = lengthPXD(&pxdlock->pxd);
2425 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
2426 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2427 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2429 pxdlistlock = (struct xdlistlock *) maplock;
2430 pxd = pxdlistlock->xdlist;
2431 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2432 xaddr = addressPXD(pxd);
2433 xlen = lengthPXD(pxd);
2434 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,
2436 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2437 (ulong) xaddr, xlen);
2445 * function: free from persistent and/or working map;
2447 * todo: optimization
2449 void txFreeMap(struct inode *ip,
2450 struct maplock * maplock, struct tblock * tblk, int maptype)
2452 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2453 struct xdlistlock *xadlistlock;
2457 struct pxd_lock *pxdlock;
2458 struct xdlistlock *pxdlistlock;
2462 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2463 tblk, maplock, maptype);
2466 * free from persistent map;
2468 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2469 if (maplock->flag & mlckFREEXADLIST) {
2470 xadlistlock = (struct xdlistlock *) maplock;
2471 xad = xadlistlock->xdlist;
2472 for (n = 0; n < xadlistlock->count; n++, xad++) {
2473 if (!(xad->flag & XAD_NEW)) {
2474 xaddr = addressXAD(xad);
2475 xlen = lengthXAD(xad);
2476 dbUpdatePMap(ipbmap, true, xaddr,
2478 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2479 (ulong) xaddr, xlen);
2482 } else if (maplock->flag & mlckFREEPXD) {
2483 pxdlock = (struct pxd_lock *) maplock;
2484 xaddr = addressPXD(&pxdlock->pxd);
2485 xlen = lengthPXD(&pxdlock->pxd);
2486 dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,
2488 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2489 (ulong) xaddr, xlen);
2490 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2492 pxdlistlock = (struct xdlistlock *) maplock;
2493 pxd = pxdlistlock->xdlist;
2494 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2495 xaddr = addressPXD(pxd);
2496 xlen = lengthPXD(pxd);
2497 dbUpdatePMap(ipbmap, true, xaddr,
2499 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2500 (ulong) xaddr, xlen);
2506 * free from working map;
2508 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2509 if (maplock->flag & mlckFREEXADLIST) {
2510 xadlistlock = (struct xdlistlock *) maplock;
2511 xad = xadlistlock->xdlist;
2512 for (n = 0; n < xadlistlock->count; n++, xad++) {
2513 xaddr = addressXAD(xad);
2514 xlen = lengthXAD(xad);
2515 dbFree(ip, xaddr, (s64) xlen);
2517 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2518 (ulong) xaddr, xlen);
2520 } else if (maplock->flag & mlckFREEPXD) {
2521 pxdlock = (struct pxd_lock *) maplock;
2522 xaddr = addressPXD(&pxdlock->pxd);
2523 xlen = lengthPXD(&pxdlock->pxd);
2524 dbFree(ip, xaddr, (s64) xlen);
2525 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2526 (ulong) xaddr, xlen);
2527 } else { /* (maplock->flag & mlckFREEPXDLIST) */
2529 pxdlistlock = (struct xdlistlock *) maplock;
2530 pxd = pxdlistlock->xdlist;
2531 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2532 xaddr = addressPXD(pxd);
2533 xlen = lengthPXD(pxd);
2534 dbFree(ip, xaddr, (s64) xlen);
2535 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2536 (ulong) xaddr, xlen);
2545 * function: remove tlock from inode anonymous locklist
2547 void txFreelock(struct inode *ip)
2549 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2550 struct tlock *xtlck, *tlck;
2551 lid_t xlid = 0, lid;
2553 if (!jfs_ip->atlhead)
2557 xtlck = (struct tlock *) &jfs_ip->atlhead;
2559 while ((lid = xtlck->next) != 0) {
2560 tlck = lid_to_tlock(lid);
2561 if (tlck->flag & tlckFREELOCK) {
2562 xtlck->next = tlck->next;
2570 if (jfs_ip->atlhead)
2571 jfs_ip->atltail = xlid;
2573 jfs_ip->atltail = 0;
2575 * If inode was on anon_list, remove it
2577 list_del_init(&jfs_ip->anon_inode_list);
2585 * function: abort tx before commit;
2587 * frees line-locks and segment locks for all
2588 * segments in comdata structure.
2589 * Optionally sets state of file-system to FM_DIRTY in super-block.
2590 * log age of page-frames in memory for which caller has
2591 * are reset to 0 (to avoid logwarap).
2593 void txAbort(tid_t tid, int dirty)
2596 struct metapage *mp;
2597 struct tblock *tblk = tid_to_tblock(tid);
2601 * free tlocks of the transaction
2603 for (lid = tblk->next; lid; lid = next) {
2604 tlck = lid_to_tlock(lid);
2607 JFS_IP(tlck->ip)->xtlid = 0;
2613 * reset lsn of page to avoid logwarap:
2615 * (page may have been previously committed by another
2616 * transaction(s) but has not been paged, i.e.,
2617 * it may be on logsync list even though it has not
2618 * been logged for the current tx.)
2620 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2623 /* insert tlock at head of freelist */
2629 /* caller will free the transaction block */
2631 tblk->next = tblk->last = 0;
2634 * mark filesystem dirty
2637 jfs_error(tblk->sb, "\n");
2643 * txLazyCommit(void)
2645 * All transactions except those changing ipimap (COMMIT_FORCE) are
2646 * processed by this routine. This insures that the inode and block
2647 * allocation maps are updated in order. For synchronous transactions,
2648 * let the user thread finish processing after txUpdateMap() is called.
2650 static void txLazyCommit(struct tblock * tblk)
2652 struct jfs_log *log;
2654 while (((tblk->flag & tblkGC_READY) == 0) &&
2655 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2656 /* We must have gotten ahead of the user thread
2658 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2662 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2666 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2668 spin_lock_irq(&log->gclock); // LOGGC_LOCK
2670 tblk->flag |= tblkGC_COMMITTED;
2672 if (tblk->flag & tblkGC_READY)
2675 wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP
2678 * Can't release log->gclock until we've tested tblk->flag
2680 if (tblk->flag & tblkGC_LAZY) {
2681 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2683 tblk->flag &= ~tblkGC_LAZY;
2684 txEnd(tblk - TxBlock); /* Convert back to tid */
2686 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2688 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2692 * jfs_lazycommit(void)
2694 * To be run as a kernel daemon. If lbmIODone is called in an interrupt
2695 * context, or where blocking is not wanted, this routine will process
2696 * committed transactions from the unlock queue.
2698 int jfs_lazycommit(void *arg)
2701 struct tblock *tblk;
2702 unsigned long flags;
2703 struct jfs_sb_info *sbi;
2707 jfs_commit_thread_waking = 0; /* OK to wake another thread */
2708 while (!list_empty(&TxAnchor.unlock_queue)) {
2710 list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2713 sbi = JFS_SBI(tblk->sb);
2715 * For each volume, the transactions must be
2716 * handled in order. If another commit thread
2717 * is handling a tblk for this superblock,
2720 if (sbi->commit_state & IN_LAZYCOMMIT)
2723 sbi->commit_state |= IN_LAZYCOMMIT;
2727 * Remove transaction from queue
2729 list_del(&tblk->cqueue);
2735 sbi->commit_state &= ~IN_LAZYCOMMIT;
2737 * Don't continue in the for loop. (We can't
2738 * anyway, it's unsafe!) We want to go back to
2739 * the beginning of the list.
2744 /* If there was nothing to do, don't continue */
2748 /* In case a wakeup came while all threads were active */
2749 jfs_commit_thread_waking = 0;
2751 if (freezing(current)) {
2755 DECLARE_WAITQUEUE(wq, current);
2757 add_wait_queue(&jfs_commit_thread_wait, &wq);
2758 set_current_state(TASK_INTERRUPTIBLE);
2761 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2763 } while (!kthread_should_stop());
2765 if (!list_empty(&TxAnchor.unlock_queue))
2766 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2768 jfs_info("jfs_lazycommit being killed");
2772 void txLazyUnlock(struct tblock * tblk)
2774 unsigned long flags;
2778 list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2780 * Don't wake up a commit thread if there is already one servicing
2781 * this superblock, or if the last one we woke up hasn't started yet.
2783 if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
2784 !jfs_commit_thread_waking) {
2785 jfs_commit_thread_waking = 1;
2786 wake_up(&jfs_commit_thread_wait);
2791 static void LogSyncRelease(struct metapage * mp)
2793 struct jfs_log *log = mp->log;
2795 assert(mp->nohomeok);
2797 metapage_homeok(mp);
2803 * Block all new transactions and push anonymous transactions to
2806 * This does almost the same thing as jfs_sync below. We don't
2807 * worry about deadlocking when jfs_tlocks_low is set, since we would
2808 * expect jfs_sync to get us out of that jam.
2810 void txQuiesce(struct super_block *sb)
2813 struct jfs_inode_info *jfs_ip;
2814 struct jfs_log *log = JFS_SBI(sb)->log;
2817 set_bit(log_QUIESCE, &log->flag);
2821 while (!list_empty(&TxAnchor.anon_list)) {
2822 jfs_ip = list_entry(TxAnchor.anon_list.next,
2823 struct jfs_inode_info,
2825 ip = &jfs_ip->vfs_inode;
2828 * inode will be removed from anonymous list
2829 * when it is committed
2832 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2833 mutex_lock(&jfs_ip->commit_mutex);
2834 txCommit(tid, 1, &ip, 0);
2836 mutex_unlock(&jfs_ip->commit_mutex);
2838 * Just to be safe. I don't know how
2839 * long we can run without blocking
2846 * If jfs_sync is running in parallel, there could be some inodes
2847 * on anon_list2. Let's check.
2849 if (!list_empty(&TxAnchor.anon_list2)) {
2850 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2856 * We may need to kick off the group commit
2858 jfs_flush_journal(log, 0);
2864 * Allows transactions to start again following txQuiesce
2866 void txResume(struct super_block *sb)
2868 struct jfs_log *log = JFS_SBI(sb)->log;
2870 clear_bit(log_QUIESCE, &log->flag);
2871 TXN_WAKEUP(&log->syncwait);
2877 * To be run as a kernel daemon. This is awakened when tlocks run low.
2878 * We write any inodes that have anonymous tlocks so they will become
2881 int jfs_sync(void *arg)
2884 struct jfs_inode_info *jfs_ip;
2889 * write each inode on the anonymous inode list
2892 while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2893 jfs_ip = list_entry(TxAnchor.anon_list.next,
2894 struct jfs_inode_info,
2896 ip = &jfs_ip->vfs_inode;
2900 * Inode is being freed
2902 list_del_init(&jfs_ip->anon_inode_list);
2903 } else if (mutex_trylock(&jfs_ip->commit_mutex)) {
2905 * inode will be removed from anonymous list
2906 * when it is committed
2909 tid = txBegin(ip->i_sb, COMMIT_INODE);
2910 txCommit(tid, 1, &ip, 0);
2912 mutex_unlock(&jfs_ip->commit_mutex);
2916 * Just to be safe. I don't know how
2917 * long we can run without blocking
2922 /* We can't get the commit mutex. It may
2923 * be held by a thread waiting for tlock's
2924 * so let's not block here. Save it to
2925 * put back on the anon_list.
2928 /* Move from anon_list to anon_list2 */
2929 list_move(&jfs_ip->anon_inode_list,
2930 &TxAnchor.anon_list2);
2937 /* Add anon_list2 back to anon_list */
2938 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2940 if (freezing(current)) {
2944 set_current_state(TASK_INTERRUPTIBLE);
2948 } while (!kthread_should_stop());
2950 jfs_info("jfs_sync being killed");
2954 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
2955 int jfs_txanchor_proc_show(struct seq_file *m, void *v)
2962 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
2964 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
2966 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
2974 "freelockwait = %s\n"
2975 "lowlockwait = %s\n"
2976 "tlocksInUse = %d\n"
2977 "jfs_tlocks_low = %d\n"
2978 "unlock_queue is %sempty\n",
2984 TxAnchor.tlocksInUse,
2986 list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
2991 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
2992 int jfs_txstats_proc_show(struct seq_file *m, void *v)
2997 "calls to txBegin = %d\n"
2998 "txBegin blocked by sync barrier = %d\n"
2999 "txBegin blocked by tlocks low = %d\n"
3000 "txBegin blocked by no free tid = %d\n"
3001 "calls to txBeginAnon = %d\n"
3002 "txBeginAnon blocked by sync barrier = %d\n"
3003 "txBeginAnon blocked by tlocks low = %d\n"
3004 "calls to txLockAlloc = %d\n"
3005 "tLockAlloc blocked by no free lock = %d\n",
3007 TxStat.txBegin_barrier,
3008 TxStat.txBegin_lockslow,
3009 TxStat.txBegin_freetid,
3011 TxStat.txBeginAnon_barrier,
3012 TxStat.txBeginAnon_lockslow,
3014 TxStat.txLockAlloc_freelock);