Subject: [PATCH] Smack: Cgroup filesystem access
[platform/kernel/kernel-mfld-blackbay.git] / fs / fs-writeback.c
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002    Andrew Morton
12  *              Split out of fs/inode.c
13  *              Additions for address_space-based writeback
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/buffer_head.h>
29 #include <linux/tracepoint.h>
30 #include "internal.h"
31
32 /*
33  * Passed into wb_writeback(), essentially a subset of writeback_control
34  */
35 struct wb_writeback_work {
36         long nr_pages;
37         struct super_block *sb;
38         enum writeback_sync_modes sync_mode;
39         unsigned int tagged_writepages:1;
40         unsigned int for_kupdate:1;
41         unsigned int range_cyclic:1;
42         unsigned int for_background:1;
43
44         struct list_head list;          /* pending work list */
45         struct completion *done;        /* set if the caller waits */
46 };
47
48 /*
49  * Include the creation of the trace points after defining the
50  * wb_writeback_work structure so that the definition remains local to this
51  * file.
52  */
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/writeback.h>
55
56 /*
57  * We don't actually have pdflush, but this one is exported though /proc...
58  */
59 int nr_pdflush_threads;
60
61 /**
62  * writeback_in_progress - determine whether there is writeback in progress
63  * @bdi: the device's backing_dev_info structure.
64  *
65  * Determine whether there is writeback waiting to be handled against a
66  * backing device.
67  */
68 int writeback_in_progress(struct backing_dev_info *bdi)
69 {
70         return test_bit(BDI_writeback_running, &bdi->state);
71 }
72
73 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
74 {
75         struct super_block *sb = inode->i_sb;
76
77         if (strcmp(sb->s_type->name, "bdev") == 0)
78                 return inode->i_mapping->backing_dev_info;
79
80         return sb->s_bdi;
81 }
82
83 static inline struct inode *wb_inode(struct list_head *head)
84 {
85         return list_entry(head, struct inode, i_wb_list);
86 }
87
88 /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
89 static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
90 {
91         if (bdi->wb.task) {
92                 wake_up_process(bdi->wb.task);
93         } else {
94                 /*
95                  * The bdi thread isn't there, wake up the forker thread which
96                  * will create and run it.
97                  */
98                 wake_up_process(default_backing_dev_info.wb.task);
99         }
100 }
101
102 static void bdi_queue_work(struct backing_dev_info *bdi,
103                            struct wb_writeback_work *work)
104 {
105         trace_writeback_queue(bdi, work);
106
107         spin_lock_bh(&bdi->wb_lock);
108         list_add_tail(&work->list, &bdi->work_list);
109         if (!bdi->wb.task)
110                 trace_writeback_nothread(bdi, work);
111         bdi_wakeup_flusher(bdi);
112         spin_unlock_bh(&bdi->wb_lock);
113 }
114
115 static void
116 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
117                       bool range_cyclic)
118 {
119         struct wb_writeback_work *work;
120
121         /*
122          * This is WB_SYNC_NONE writeback, so if allocation fails just
123          * wakeup the thread for old dirty data writeback
124          */
125         work = kzalloc(sizeof(*work), GFP_ATOMIC);
126         if (!work) {
127                 if (bdi->wb.task) {
128                         trace_writeback_nowork(bdi);
129                         wake_up_process(bdi->wb.task);
130                 }
131                 return;
132         }
133
134         work->sync_mode = WB_SYNC_NONE;
135         work->nr_pages  = nr_pages;
136         work->range_cyclic = range_cyclic;
137
138         bdi_queue_work(bdi, work);
139 }
140
141 /**
142  * bdi_start_writeback - start writeback
143  * @bdi: the backing device to write from
144  * @nr_pages: the number of pages to write
145  *
146  * Description:
147  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
148  *   started when this function returns, we make no guarantees on
149  *   completion. Caller need not hold sb s_umount semaphore.
150  *
151  */
152 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
153 {
154         __bdi_start_writeback(bdi, nr_pages, true);
155 }
156
157 /**
158  * bdi_start_background_writeback - start background writeback
159  * @bdi: the backing device to write from
160  *
161  * Description:
162  *   This makes sure WB_SYNC_NONE background writeback happens. When
163  *   this function returns, it is only guaranteed that for given BDI
164  *   some IO is happening if we are over background dirty threshold.
165  *   Caller need not hold sb s_umount semaphore.
166  */
167 void bdi_start_background_writeback(struct backing_dev_info *bdi)
168 {
169         /*
170          * We just wake up the flusher thread. It will perform background
171          * writeback as soon as there is no other work to do.
172          */
173         trace_writeback_wake_background(bdi);
174         spin_lock_bh(&bdi->wb_lock);
175         bdi_wakeup_flusher(bdi);
176         spin_unlock_bh(&bdi->wb_lock);
177 }
178
179 /*
180  * Remove the inode from the writeback list it is on.
181  */
182 void inode_wb_list_del(struct inode *inode)
183 {
184         spin_lock(&inode_wb_list_lock);
185         list_del_init(&inode->i_wb_list);
186         spin_unlock(&inode_wb_list_lock);
187 }
188
189
190 /*
191  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
192  * furthest end of its superblock's dirty-inode list.
193  *
194  * Before stamping the inode's ->dirtied_when, we check to see whether it is
195  * already the most-recently-dirtied inode on the b_dirty list.  If that is
196  * the case then the inode must have been redirtied while it was being written
197  * out and we don't reset its dirtied_when.
198  */
199 static void redirty_tail(struct inode *inode)
200 {
201         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
202
203         assert_spin_locked(&inode_wb_list_lock);
204         if (!list_empty(&wb->b_dirty)) {
205                 struct inode *tail;
206
207                 tail = wb_inode(wb->b_dirty.next);
208                 if (time_before(inode->dirtied_when, tail->dirtied_when))
209                         inode->dirtied_when = jiffies;
210         }
211         list_move(&inode->i_wb_list, &wb->b_dirty);
212 }
213
214 /*
215  * requeue inode for re-scanning after bdi->b_io list is exhausted.
216  */
217 static void requeue_io(struct inode *inode)
218 {
219         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
220
221         assert_spin_locked(&inode_wb_list_lock);
222         list_move(&inode->i_wb_list, &wb->b_more_io);
223 }
224
225 static void inode_sync_complete(struct inode *inode)
226 {
227         /*
228          * Prevent speculative execution through
229          * spin_unlock(&inode_wb_list_lock);
230          */
231
232         smp_mb();
233         wake_up_bit(&inode->i_state, __I_SYNC);
234 }
235
236 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
237 {
238         bool ret = time_after(inode->dirtied_when, t);
239 #ifndef CONFIG_64BIT
240         /*
241          * For inodes being constantly redirtied, dirtied_when can get stuck.
242          * It _appears_ to be in the future, but is actually in distant past.
243          * This test is necessary to prevent such wrapped-around relative times
244          * from permanently stopping the whole bdi writeback.
245          */
246         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
247 #endif
248         return ret;
249 }
250
251 /*
252  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
253  */
254 static void move_expired_inodes(struct list_head *delaying_queue,
255                                struct list_head *dispatch_queue,
256                                 unsigned long *older_than_this)
257 {
258         LIST_HEAD(tmp);
259         struct list_head *pos, *node;
260         struct super_block *sb = NULL;
261         struct inode *inode;
262         int do_sb_sort = 0;
263
264         while (!list_empty(delaying_queue)) {
265                 inode = wb_inode(delaying_queue->prev);
266                 if (older_than_this &&
267                     inode_dirtied_after(inode, *older_than_this))
268                         break;
269                 if (sb && sb != inode->i_sb)
270                         do_sb_sort = 1;
271                 sb = inode->i_sb;
272                 list_move(&inode->i_wb_list, &tmp);
273         }
274
275         /* just one sb in list, splice to dispatch_queue and we're done */
276         if (!do_sb_sort) {
277                 list_splice(&tmp, dispatch_queue);
278                 return;
279         }
280
281         /* Move inodes from one superblock together */
282         while (!list_empty(&tmp)) {
283                 sb = wb_inode(tmp.prev)->i_sb;
284                 list_for_each_prev_safe(pos, node, &tmp) {
285                         inode = wb_inode(pos);
286                         if (inode->i_sb == sb)
287                                 list_move(&inode->i_wb_list, dispatch_queue);
288                 }
289         }
290 }
291
292 /*
293  * Queue all expired dirty inodes for io, eldest first.
294  * Before
295  *         newly dirtied     b_dirty    b_io    b_more_io
296  *         =============>    gf         edc     BA
297  * After
298  *         newly dirtied     b_dirty    b_io    b_more_io
299  *         =============>    g          fBAedc
300  *                                           |
301  *                                           +--> dequeue for IO
302  */
303 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
304 {
305         assert_spin_locked(&inode_wb_list_lock);
306         list_splice_init(&wb->b_more_io, &wb->b_io);
307         move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
308 }
309
310 static int write_inode(struct inode *inode, struct writeback_control *wbc)
311 {
312         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
313                 return inode->i_sb->s_op->write_inode(inode, wbc);
314         return 0;
315 }
316
317 /*
318  * Wait for writeback on an inode to complete.
319  */
320 static void inode_wait_for_writeback(struct inode *inode)
321 {
322         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
323         wait_queue_head_t *wqh;
324
325         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
326         while (inode->i_state & I_SYNC) {
327                 spin_unlock(&inode->i_lock);
328                 spin_unlock(&inode_wb_list_lock);
329                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
330                 spin_lock(&inode_wb_list_lock);
331                 spin_lock(&inode->i_lock);
332         }
333 }
334
335 /*
336  * Write out an inode's dirty pages.  Called under inode_wb_list_lock and
337  * inode->i_lock.  Either the caller has an active reference on the inode or
338  * the inode has I_WILL_FREE set.
339  *
340  * If `wait' is set, wait on the writeout.
341  *
342  * The whole writeout design is quite complex and fragile.  We want to avoid
343  * starvation of particular inodes when others are being redirtied, prevent
344  * livelocks, etc.
345  */
346 static int
347 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
348 {
349         struct address_space *mapping = inode->i_mapping;
350         unsigned dirty;
351         int ret;
352
353         assert_spin_locked(&inode_wb_list_lock);
354         assert_spin_locked(&inode->i_lock);
355
356         if (!atomic_read(&inode->i_count))
357                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
358         else
359                 WARN_ON(inode->i_state & I_WILL_FREE);
360
361         if (inode->i_state & I_SYNC) {
362                 /*
363                  * If this inode is locked for writeback and we are not doing
364                  * writeback-for-data-integrity, move it to b_more_io so that
365                  * writeback can proceed with the other inodes on s_io.
366                  *
367                  * We'll have another go at writing back this inode when we
368                  * completed a full scan of b_io.
369                  */
370                 if (wbc->sync_mode != WB_SYNC_ALL) {
371                         requeue_io(inode);
372                         return 0;
373                 }
374
375                 /*
376                  * It's a data-integrity sync.  We must wait.
377                  */
378                 inode_wait_for_writeback(inode);
379         }
380
381         BUG_ON(inode->i_state & I_SYNC);
382
383         /* Set I_SYNC, reset I_DIRTY_PAGES */
384         inode->i_state |= I_SYNC;
385         inode->i_state &= ~I_DIRTY_PAGES;
386         spin_unlock(&inode->i_lock);
387         spin_unlock(&inode_wb_list_lock);
388
389         ret = do_writepages(mapping, wbc);
390
391         /*
392          * Make sure to wait on the data before writing out the metadata.
393          * This is important for filesystems that modify metadata on data
394          * I/O completion.
395          */
396         if (wbc->sync_mode == WB_SYNC_ALL) {
397                 int err = filemap_fdatawait(mapping);
398                 if (ret == 0)
399                         ret = err;
400         }
401
402         /*
403          * Some filesystems may redirty the inode during the writeback
404          * due to delalloc, clear dirty metadata flags right before
405          * write_inode()
406          */
407         spin_lock(&inode->i_lock);
408         dirty = inode->i_state & I_DIRTY;
409         inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
410         spin_unlock(&inode->i_lock);
411         /* Don't write the inode if only I_DIRTY_PAGES was set */
412         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
413                 int err = write_inode(inode, wbc);
414                 if (ret == 0)
415                         ret = err;
416         }
417
418         spin_lock(&inode_wb_list_lock);
419         spin_lock(&inode->i_lock);
420         inode->i_state &= ~I_SYNC;
421         if (!(inode->i_state & I_FREEING)) {
422                 /*
423                  * Sync livelock prevention. Each inode is tagged and synced in
424                  * one shot. If still dirty, it will be redirty_tail()'ed below.
425                  * Update the dirty time to prevent enqueue and sync it again.
426                  */
427                 if ((inode->i_state & I_DIRTY) &&
428                     (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
429                         inode->dirtied_when = jiffies;
430
431                 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
432                         /*
433                          * We didn't write back all the pages.  nfs_writepages()
434                          * sometimes bales out without doing anything.
435                          */
436                         inode->i_state |= I_DIRTY_PAGES;
437                         if (wbc->nr_to_write <= 0) {
438                                 /*
439                                  * slice used up: queue for next turn
440                                  */
441                                 requeue_io(inode);
442                         } else {
443                                 /*
444                                  * Writeback blocked by something other than
445                                  * congestion. Delay the inode for some time to
446                                  * avoid spinning on the CPU (100% iowait)
447                                  * retrying writeback of the dirty page/inode
448                                  * that cannot be performed immediately.
449                                  */
450                                 redirty_tail(inode);
451                         }
452                 } else if (inode->i_state & I_DIRTY) {
453                         /*
454                          * Filesystems can dirty the inode during writeback
455                          * operations, such as delayed allocation during
456                          * submission or metadata updates after data IO
457                          * completion.
458                          */
459                         redirty_tail(inode);
460                 } else {
461                         /*
462                          * The inode is clean.  At this point we either have
463                          * a reference to the inode or it's on it's way out.
464                          * No need to add it back to the LRU.
465                          */
466                         list_del_init(&inode->i_wb_list);
467                 }
468         }
469         inode_sync_complete(inode);
470         return ret;
471 }
472
473 /*
474  * For background writeback the caller does not have the sb pinned
475  * before calling writeback. So make sure that we do pin it, so it doesn't
476  * go away while we are writing inodes from it.
477  */
478 static bool pin_sb_for_writeback(struct super_block *sb)
479 {
480         spin_lock(&sb_lock);
481         if (list_empty(&sb->s_instances)) {
482                 spin_unlock(&sb_lock);
483                 return false;
484         }
485
486         sb->s_count++;
487         spin_unlock(&sb_lock);
488
489         if (down_read_trylock(&sb->s_umount)) {
490                 if (sb->s_root)
491                         return true;
492                 up_read(&sb->s_umount);
493         }
494
495         put_super(sb);
496         return false;
497 }
498
499 /*
500  * Write a portion of b_io inodes which belong to @sb.
501  *
502  * If @only_this_sb is true, then find and write all such
503  * inodes. Otherwise write only ones which go sequentially
504  * in reverse order.
505  *
506  * Return 1, if the caller writeback routine should be
507  * interrupted. Otherwise return 0.
508  */
509 static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
510                 struct writeback_control *wbc, bool only_this_sb)
511 {
512         while (!list_empty(&wb->b_io)) {
513                 long pages_skipped;
514                 struct inode *inode = wb_inode(wb->b_io.prev);
515
516                 if (inode->i_sb != sb) {
517                         if (only_this_sb) {
518                                 /*
519                                  * We only want to write back data for this
520                                  * superblock, move all inodes not belonging
521                                  * to it back onto the dirty list.
522                                  */
523                                 redirty_tail(inode);
524                                 continue;
525                         }
526
527                         /*
528                          * The inode belongs to a different superblock.
529                          * Bounce back to the caller to unpin this and
530                          * pin the next superblock.
531                          */
532                         return 0;
533                 }
534
535                 /*
536                  * Don't bother with new inodes or inodes beeing freed, first
537                  * kind does not need peridic writeout yet, and for the latter
538                  * kind writeout is handled by the freer.
539                  */
540                 spin_lock(&inode->i_lock);
541                 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
542                         spin_unlock(&inode->i_lock);
543                         requeue_io(inode);
544                         continue;
545                 }
546
547                 /*
548                  * Was this inode dirtied after sync_sb_inodes was called?
549                  * This keeps sync from extra jobs and livelock.
550                  */
551                 if (inode_dirtied_after(inode, wbc->wb_start)) {
552                         spin_unlock(&inode->i_lock);
553                         return 1;
554                 }
555
556                 __iget(inode);
557
558                 pages_skipped = wbc->pages_skipped;
559                 writeback_single_inode(inode, wbc);
560                 if (wbc->pages_skipped != pages_skipped) {
561                         /*
562                          * writeback is not making progress due to locked
563                          * buffers.  Skip this inode for now.
564                          */
565                         redirty_tail(inode);
566                 }
567                 spin_unlock(&inode->i_lock);
568                 spin_unlock(&inode_wb_list_lock);
569                 iput(inode);
570                 cond_resched();
571                 spin_lock(&inode_wb_list_lock);
572                 if (wbc->nr_to_write <= 0) {
573                         wbc->more_io = 1;
574                         return 1;
575                 }
576                 if (!list_empty(&wb->b_more_io))
577                         wbc->more_io = 1;
578         }
579         /* b_io is empty */
580         return 1;
581 }
582
583 void writeback_inodes_wb(struct bdi_writeback *wb,
584                 struct writeback_control *wbc)
585 {
586         int ret = 0;
587
588         if (!wbc->wb_start)
589                 wbc->wb_start = jiffies; /* livelock avoidance */
590         spin_lock(&inode_wb_list_lock);
591         if (!wbc->for_kupdate || list_empty(&wb->b_io))
592                 queue_io(wb, wbc->older_than_this);
593
594         while (!list_empty(&wb->b_io)) {
595                 struct inode *inode = wb_inode(wb->b_io.prev);
596                 struct super_block *sb = inode->i_sb;
597
598                 if (!pin_sb_for_writeback(sb)) {
599                         requeue_io(inode);
600                         continue;
601                 }
602                 ret = writeback_sb_inodes(sb, wb, wbc, false);
603                 drop_super(sb);
604
605                 if (ret)
606                         break;
607         }
608         spin_unlock(&inode_wb_list_lock);
609         /* Leave any unwritten inodes on b_io */
610 }
611
612 static void __writeback_inodes_sb(struct super_block *sb,
613                 struct bdi_writeback *wb, struct writeback_control *wbc)
614 {
615         WARN_ON(!rwsem_is_locked(&sb->s_umount));
616
617         spin_lock(&inode_wb_list_lock);
618         if (!wbc->for_kupdate || list_empty(&wb->b_io))
619                 queue_io(wb, wbc->older_than_this);
620         writeback_sb_inodes(sb, wb, wbc, true);
621         spin_unlock(&inode_wb_list_lock);
622 }
623
624 /*
625  * The maximum number of pages to writeout in a single bdi flush/kupdate
626  * operation.  We do this so we don't hold I_SYNC against an inode for
627  * enormous amounts of time, which would block a userspace task which has
628  * been forced to throttle against that inode.  Also, the code reevaluates
629  * the dirty each time it has written this many pages.
630  */
631 #define MAX_WRITEBACK_PAGES     1024
632
633 static inline bool over_bground_thresh(void)
634 {
635         unsigned long background_thresh, dirty_thresh;
636
637         global_dirty_limits(&background_thresh, &dirty_thresh);
638
639         return (global_page_state(NR_FILE_DIRTY) +
640                 global_page_state(NR_UNSTABLE_NFS) > background_thresh);
641 }
642
643 /*
644  * Explicit flushing or periodic writeback of "old" data.
645  *
646  * Define "old": the first time one of an inode's pages is dirtied, we mark the
647  * dirtying-time in the inode's address_space.  So this periodic writeback code
648  * just walks the superblock inode list, writing back any inodes which are
649  * older than a specific point in time.
650  *
651  * Try to run once per dirty_writeback_interval.  But if a writeback event
652  * takes longer than a dirty_writeback_interval interval, then leave a
653  * one-second gap.
654  *
655  * older_than_this takes precedence over nr_to_write.  So we'll only write back
656  * all dirty pages if they are all attached to "old" mappings.
657  */
658 static long wb_writeback(struct bdi_writeback *wb,
659                          struct wb_writeback_work *work)
660 {
661         struct writeback_control wbc = {
662                 .sync_mode              = work->sync_mode,
663                 .tagged_writepages      = work->tagged_writepages,
664                 .older_than_this        = NULL,
665                 .for_kupdate            = work->for_kupdate,
666                 .for_background         = work->for_background,
667                 .range_cyclic           = work->range_cyclic,
668         };
669         unsigned long oldest_jif;
670         long wrote = 0;
671         long write_chunk = MAX_WRITEBACK_PAGES;
672         struct inode *inode;
673
674         if (wbc.for_kupdate) {
675                 wbc.older_than_this = &oldest_jif;
676                 oldest_jif = jiffies -
677                                 msecs_to_jiffies(dirty_expire_interval * 10);
678         }
679         if (!wbc.range_cyclic) {
680                 wbc.range_start = 0;
681                 wbc.range_end = LLONG_MAX;
682         }
683
684         /*
685          * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
686          * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
687          * here avoids calling into writeback_inodes_wb() more than once.
688          *
689          * The intended call sequence for WB_SYNC_ALL writeback is:
690          *
691          *      wb_writeback()
692          *          __writeback_inodes_sb()     <== called only once
693          *              write_cache_pages()     <== called once for each inode
694          *                   (quickly) tag currently dirty pages
695          *                   (maybe slowly) sync all tagged pages
696          */
697         if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages)
698                 write_chunk = LONG_MAX;
699
700         wbc.wb_start = jiffies; /* livelock avoidance */
701         for (;;) {
702                 /*
703                  * Stop writeback when nr_pages has been consumed
704                  */
705                 if (work->nr_pages <= 0)
706                         break;
707
708                 /*
709                  * Background writeout and kupdate-style writeback may
710                  * run forever. Stop them if there is other work to do
711                  * so that e.g. sync can proceed. They'll be restarted
712                  * after the other works are all done.
713                  */
714                 if ((work->for_background || work->for_kupdate) &&
715                     !list_empty(&wb->bdi->work_list))
716                         break;
717
718                 /*
719                  * For background writeout, stop when we are below the
720                  * background dirty threshold
721                  */
722                 if (work->for_background && !over_bground_thresh())
723                         break;
724
725                 wbc.more_io = 0;
726                 wbc.nr_to_write = write_chunk;
727                 wbc.pages_skipped = 0;
728
729                 trace_wbc_writeback_start(&wbc, wb->bdi);
730                 if (work->sb)
731                         __writeback_inodes_sb(work->sb, wb, &wbc);
732                 else
733                         writeback_inodes_wb(wb, &wbc);
734                 trace_wbc_writeback_written(&wbc, wb->bdi);
735
736                 work->nr_pages -= write_chunk - wbc.nr_to_write;
737                 wrote += write_chunk - wbc.nr_to_write;
738
739                 /*
740                  * If we consumed everything, see if we have more
741                  */
742                 if (wbc.nr_to_write <= 0)
743                         continue;
744                 /*
745                  * Didn't write everything and we don't have more IO, bail
746                  */
747                 if (!wbc.more_io)
748                         break;
749                 /*
750                  * Did we write something? Try for more
751                  */
752                 if (wbc.nr_to_write < write_chunk)
753                         continue;
754                 /*
755                  * Nothing written. Wait for some inode to
756                  * become available for writeback. Otherwise
757                  * we'll just busyloop.
758                  */
759                 spin_lock(&inode_wb_list_lock);
760                 if (!list_empty(&wb->b_more_io))  {
761                         inode = wb_inode(wb->b_more_io.prev);
762                         trace_wbc_writeback_wait(&wbc, wb->bdi);
763                         spin_lock(&inode->i_lock);
764                         inode_wait_for_writeback(inode);
765                         spin_unlock(&inode->i_lock);
766                 }
767                 spin_unlock(&inode_wb_list_lock);
768         }
769
770         return wrote;
771 }
772
773 /*
774  * Return the next wb_writeback_work struct that hasn't been processed yet.
775  */
776 static struct wb_writeback_work *
777 get_next_work_item(struct backing_dev_info *bdi)
778 {
779         struct wb_writeback_work *work = NULL;
780
781         spin_lock_bh(&bdi->wb_lock);
782         if (!list_empty(&bdi->work_list)) {
783                 work = list_entry(bdi->work_list.next,
784                                   struct wb_writeback_work, list);
785                 list_del_init(&work->list);
786         }
787         spin_unlock_bh(&bdi->wb_lock);
788         return work;
789 }
790
791 /*
792  * Add in the number of potentially dirty inodes, because each inode
793  * write can dirty pagecache in the underlying blockdev.
794  */
795 static unsigned long get_nr_dirty_pages(void)
796 {
797         return global_page_state(NR_FILE_DIRTY) +
798                 global_page_state(NR_UNSTABLE_NFS) +
799                 get_nr_dirty_inodes();
800 }
801
802 static long wb_check_background_flush(struct bdi_writeback *wb)
803 {
804         if (over_bground_thresh()) {
805
806                 struct wb_writeback_work work = {
807                         .nr_pages       = LONG_MAX,
808                         .sync_mode      = WB_SYNC_NONE,
809                         .for_background = 1,
810                         .range_cyclic   = 1,
811                 };
812
813                 return wb_writeback(wb, &work);
814         }
815
816         return 0;
817 }
818
819 static long wb_check_old_data_flush(struct bdi_writeback *wb)
820 {
821         unsigned long expired;
822         long nr_pages;
823
824         /*
825          * When set to zero, disable periodic writeback
826          */
827         if (!dirty_writeback_interval)
828                 return 0;
829
830         expired = wb->last_old_flush +
831                         msecs_to_jiffies(dirty_writeback_interval * 10);
832         if (time_before(jiffies, expired))
833                 return 0;
834
835         wb->last_old_flush = jiffies;
836         nr_pages = get_nr_dirty_pages();
837
838         if (nr_pages) {
839                 struct wb_writeback_work work = {
840                         .nr_pages       = nr_pages,
841                         .sync_mode      = WB_SYNC_NONE,
842                         .for_kupdate    = 1,
843                         .range_cyclic   = 1,
844                 };
845
846                 return wb_writeback(wb, &work);
847         }
848
849         return 0;
850 }
851
852 /*
853  * Retrieve work items and do the writeback they describe
854  */
855 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
856 {
857         struct backing_dev_info *bdi = wb->bdi;
858         struct wb_writeback_work *work;
859         long wrote = 0;
860
861         set_bit(BDI_writeback_running, &wb->bdi->state);
862         while ((work = get_next_work_item(bdi)) != NULL) {
863                 /*
864                  * Override sync mode, in case we must wait for completion
865                  * because this thread is exiting now.
866                  */
867                 if (force_wait)
868                         work->sync_mode = WB_SYNC_ALL;
869
870                 trace_writeback_exec(bdi, work);
871
872                 wrote += wb_writeback(wb, work);
873
874                 /*
875                  * Notify the caller of completion if this is a synchronous
876                  * work item, otherwise just free it.
877                  */
878                 if (work->done)
879                         complete(work->done);
880                 else
881                         kfree(work);
882         }
883
884         /*
885          * Check for periodic writeback, kupdated() style
886          */
887         wrote += wb_check_old_data_flush(wb);
888         wrote += wb_check_background_flush(wb);
889         clear_bit(BDI_writeback_running, &wb->bdi->state);
890
891         return wrote;
892 }
893
894 /*
895  * Handle writeback of dirty data for the device backed by this bdi. Also
896  * wakes up periodically and does kupdated style flushing.
897  */
898 int bdi_writeback_thread(void *data)
899 {
900         struct bdi_writeback *wb = data;
901         struct backing_dev_info *bdi = wb->bdi;
902         long pages_written;
903
904         current->flags |= PF_SWAPWRITE;
905         set_freezable();
906         wb->last_active = jiffies;
907
908         /*
909          * Our parent may run at a different priority, just set us to normal
910          */
911         set_user_nice(current, 0);
912
913         trace_writeback_thread_start(bdi);
914
915         while (!kthread_should_stop()) {
916                 /*
917                  * Remove own delayed wake-up timer, since we are already awake
918                  * and we'll take care of the preriodic write-back.
919                  */
920                 del_timer(&wb->wakeup_timer);
921
922                 pages_written = wb_do_writeback(wb, 0);
923
924                 trace_writeback_pages_written(pages_written);
925
926                 if (pages_written)
927                         wb->last_active = jiffies;
928
929                 set_current_state(TASK_INTERRUPTIBLE);
930                 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
931                         __set_current_state(TASK_RUNNING);
932                         continue;
933                 }
934
935                 if (wb_has_dirty_io(wb) && dirty_writeback_interval)
936                         schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
937                 else {
938                         /*
939                          * We have nothing to do, so can go sleep without any
940                          * timeout and save power. When a work is queued or
941                          * something is made dirty - we will be woken up.
942                          */
943                         schedule();
944                 }
945
946                 try_to_freeze();
947         }
948
949         /* Flush any work that raced with us exiting */
950         if (!list_empty(&bdi->work_list))
951                 wb_do_writeback(wb, 1);
952
953         trace_writeback_thread_stop(bdi);
954         return 0;
955 }
956
957
958 /*
959  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
960  * the whole world.
961  */
962 void wakeup_flusher_threads(long nr_pages)
963 {
964         struct backing_dev_info *bdi;
965
966         if (!nr_pages) {
967                 nr_pages = global_page_state(NR_FILE_DIRTY) +
968                                 global_page_state(NR_UNSTABLE_NFS);
969         }
970
971         rcu_read_lock();
972         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
973                 if (!bdi_has_dirty_io(bdi))
974                         continue;
975                 __bdi_start_writeback(bdi, nr_pages, false);
976         }
977         rcu_read_unlock();
978 }
979
980 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
981 {
982         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
983                 struct dentry *dentry;
984                 const char *name = "?";
985
986                 dentry = d_find_alias(inode);
987                 if (dentry) {
988                         spin_lock(&dentry->d_lock);
989                         name = (const char *) dentry->d_name.name;
990                 }
991                 printk(KERN_DEBUG
992                        "%s(%d): dirtied inode %lu (%s) on %s\n",
993                        current->comm, task_pid_nr(current), inode->i_ino,
994                        name, inode->i_sb->s_id);
995                 if (dentry) {
996                         spin_unlock(&dentry->d_lock);
997                         dput(dentry);
998                 }
999         }
1000 }
1001
1002 /**
1003  *      __mark_inode_dirty -    internal function
1004  *      @inode: inode to mark
1005  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1006  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
1007  *      mark_inode_dirty_sync.
1008  *
1009  * Put the inode on the super block's dirty list.
1010  *
1011  * CAREFUL! We mark it dirty unconditionally, but move it onto the
1012  * dirty list only if it is hashed or if it refers to a blockdev.
1013  * If it was not hashed, it will never be added to the dirty list
1014  * even if it is later hashed, as it will have been marked dirty already.
1015  *
1016  * In short, make sure you hash any inodes _before_ you start marking
1017  * them dirty.
1018  *
1019  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1020  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1021  * the kernel-internal blockdev inode represents the dirtying time of the
1022  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1023  * page->mapping->host, so the page-dirtying time is recorded in the internal
1024  * blockdev inode.
1025  */
1026 void __mark_inode_dirty(struct inode *inode, int flags)
1027 {
1028         struct super_block *sb = inode->i_sb;
1029         struct backing_dev_info *bdi = NULL;
1030
1031         /*
1032          * Don't do this for I_DIRTY_PAGES - that doesn't actually
1033          * dirty the inode itself
1034          */
1035         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1036                 if (sb->s_op->dirty_inode)
1037                         sb->s_op->dirty_inode(inode, flags);
1038         }
1039
1040         /*
1041          * make sure that changes are seen by all cpus before we test i_state
1042          * -- mikulas
1043          */
1044         smp_mb();
1045
1046         /* avoid the locking if we can */
1047         if ((inode->i_state & flags) == flags)
1048                 return;
1049
1050         if (unlikely(block_dump > 1))
1051                 block_dump___mark_inode_dirty(inode);
1052
1053         spin_lock(&inode->i_lock);
1054         if ((inode->i_state & flags) != flags) {
1055                 const int was_dirty = inode->i_state & I_DIRTY;
1056
1057                 inode->i_state |= flags;
1058
1059                 /*
1060                  * If the inode is being synced, just update its dirty state.
1061                  * The unlocker will place the inode on the appropriate
1062                  * superblock list, based upon its state.
1063                  */
1064                 if (inode->i_state & I_SYNC)
1065                         goto out_unlock_inode;
1066
1067                 /*
1068                  * Only add valid (hashed) inodes to the superblock's
1069                  * dirty list.  Add blockdev inodes as well.
1070                  */
1071                 if (!S_ISBLK(inode->i_mode)) {
1072                         if (inode_unhashed(inode))
1073                                 goto out_unlock_inode;
1074                 }
1075                 if (inode->i_state & I_FREEING)
1076                         goto out_unlock_inode;
1077
1078                 /*
1079                  * If the inode was already on b_dirty/b_io/b_more_io, don't
1080                  * reposition it (that would break b_dirty time-ordering).
1081                  */
1082                 if (!was_dirty) {
1083                         bool wakeup_bdi = false;
1084                         bdi = inode_to_bdi(inode);
1085
1086                         if (bdi_cap_writeback_dirty(bdi)) {
1087                                 WARN(!test_bit(BDI_registered, &bdi->state),
1088                                      "bdi-%s not registered\n", bdi->name);
1089
1090                                 /*
1091                                  * If this is the first dirty inode for this
1092                                  * bdi, we have to wake-up the corresponding
1093                                  * bdi thread to make sure background
1094                                  * write-back happens later.
1095                                  */
1096                                 if (!wb_has_dirty_io(&bdi->wb))
1097                                         wakeup_bdi = true;
1098                         }
1099
1100                         spin_unlock(&inode->i_lock);
1101                         spin_lock(&inode_wb_list_lock);
1102                         inode->dirtied_when = jiffies;
1103                         list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1104                         spin_unlock(&inode_wb_list_lock);
1105
1106                         if (wakeup_bdi)
1107                                 bdi_wakeup_thread_delayed(bdi);
1108                         return;
1109                 }
1110         }
1111 out_unlock_inode:
1112         spin_unlock(&inode->i_lock);
1113
1114 }
1115 EXPORT_SYMBOL(__mark_inode_dirty);
1116
1117 /*
1118  * Write out a superblock's list of dirty inodes.  A wait will be performed
1119  * upon no inodes, all inodes or the final one, depending upon sync_mode.
1120  *
1121  * If older_than_this is non-NULL, then only write out inodes which
1122  * had their first dirtying at a time earlier than *older_than_this.
1123  *
1124  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1125  * This function assumes that the blockdev superblock's inodes are backed by
1126  * a variety of queues, so all inodes are searched.  For other superblocks,
1127  * assume that all inodes are backed by the same queue.
1128  *
1129  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
1130  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
1131  * on the writer throttling path, and we get decent balancing between many
1132  * throttled threads: we don't want them all piling up on inode_sync_wait.
1133  */
1134 static void wait_sb_inodes(struct super_block *sb)
1135 {
1136         struct inode *inode, *old_inode = NULL;
1137
1138         /*
1139          * We need to be protected against the filesystem going from
1140          * r/o to r/w or vice versa.
1141          */
1142         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1143
1144         spin_lock(&inode_sb_list_lock);
1145
1146         /*
1147          * Data integrity sync. Must wait for all pages under writeback,
1148          * because there may have been pages dirtied before our sync
1149          * call, but which had writeout started before we write it out.
1150          * In which case, the inode may not be on the dirty list, but
1151          * we still have to wait for that writeout.
1152          */
1153         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1154                 struct address_space *mapping = inode->i_mapping;
1155
1156                 spin_lock(&inode->i_lock);
1157                 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1158                     (mapping->nrpages == 0)) {
1159                         spin_unlock(&inode->i_lock);
1160                         continue;
1161                 }
1162                 __iget(inode);
1163                 spin_unlock(&inode->i_lock);
1164                 spin_unlock(&inode_sb_list_lock);
1165
1166                 /*
1167                  * We hold a reference to 'inode' so it couldn't have been
1168                  * removed from s_inodes list while we dropped the
1169                  * inode_sb_list_lock.  We cannot iput the inode now as we can
1170                  * be holding the last reference and we cannot iput it under
1171                  * inode_sb_list_lock. So we keep the reference and iput it
1172                  * later.
1173                  */
1174                 iput(old_inode);
1175                 old_inode = inode;
1176
1177                 filemap_fdatawait(mapping);
1178
1179                 cond_resched();
1180
1181                 spin_lock(&inode_sb_list_lock);
1182         }
1183         spin_unlock(&inode_sb_list_lock);
1184         iput(old_inode);
1185 }
1186
1187 /**
1188  * writeback_inodes_sb_nr -     writeback dirty inodes from given super_block
1189  * @sb: the superblock
1190  * @nr: the number of pages to write
1191  *
1192  * Start writeback on some inodes on this super_block. No guarantees are made
1193  * on how many (if any) will be written, and this function does not wait
1194  * for IO completion of submitted IO.
1195  */
1196 void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
1197 {
1198         DECLARE_COMPLETION_ONSTACK(done);
1199         struct wb_writeback_work work = {
1200                 .sb                     = sb,
1201                 .sync_mode              = WB_SYNC_NONE,
1202                 .tagged_writepages      = 1,
1203                 .done                   = &done,
1204                 .nr_pages               = nr,
1205         };
1206
1207         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1208         bdi_queue_work(sb->s_bdi, &work);
1209         wait_for_completion(&done);
1210 }
1211 EXPORT_SYMBOL(writeback_inodes_sb_nr);
1212
1213 /**
1214  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
1215  * @sb: the superblock
1216  *
1217  * Start writeback on some inodes on this super_block. No guarantees are made
1218  * on how many (if any) will be written, and this function does not wait
1219  * for IO completion of submitted IO.
1220  */
1221 void writeback_inodes_sb(struct super_block *sb)
1222 {
1223         return writeback_inodes_sb_nr(sb, get_nr_dirty_pages());
1224 }
1225 EXPORT_SYMBOL(writeback_inodes_sb);
1226
1227 /**
1228  * writeback_inodes_sb_if_idle  -       start writeback if none underway
1229  * @sb: the superblock
1230  *
1231  * Invoke writeback_inodes_sb if no writeback is currently underway.
1232  * Returns 1 if writeback was started, 0 if not.
1233  */
1234 int writeback_inodes_sb_if_idle(struct super_block *sb)
1235 {
1236         if (!writeback_in_progress(sb->s_bdi)) {
1237                 down_read(&sb->s_umount);
1238                 writeback_inodes_sb(sb);
1239                 up_read(&sb->s_umount);
1240                 return 1;
1241         } else
1242                 return 0;
1243 }
1244 EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1245
1246 /**
1247  * writeback_inodes_sb_if_idle  -       start writeback if none underway
1248  * @sb: the superblock
1249  * @nr: the number of pages to write
1250  *
1251  * Invoke writeback_inodes_sb if no writeback is currently underway.
1252  * Returns 1 if writeback was started, 0 if not.
1253  */
1254 int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
1255                                    unsigned long nr)
1256 {
1257         if (!writeback_in_progress(sb->s_bdi)) {
1258                 down_read(&sb->s_umount);
1259                 writeback_inodes_sb_nr(sb, nr);
1260                 up_read(&sb->s_umount);
1261                 return 1;
1262         } else
1263                 return 0;
1264 }
1265 EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
1266
1267 /**
1268  * sync_inodes_sb       -       sync sb inode pages
1269  * @sb: the superblock
1270  *
1271  * This function writes and waits on any dirty inode belonging to this
1272  * super_block.
1273  */
1274 void sync_inodes_sb(struct super_block *sb)
1275 {
1276         DECLARE_COMPLETION_ONSTACK(done);
1277         struct wb_writeback_work work = {
1278                 .sb             = sb,
1279                 .sync_mode      = WB_SYNC_ALL,
1280                 .nr_pages       = LONG_MAX,
1281                 .range_cyclic   = 0,
1282                 .done           = &done,
1283         };
1284
1285         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1286
1287         bdi_queue_work(sb->s_bdi, &work);
1288         wait_for_completion(&done);
1289
1290         wait_sb_inodes(sb);
1291 }
1292 EXPORT_SYMBOL(sync_inodes_sb);
1293
1294 /**
1295  * write_inode_now      -       write an inode to disk
1296  * @inode: inode to write to disk
1297  * @sync: whether the write should be synchronous or not
1298  *
1299  * This function commits an inode to disk immediately if it is dirty. This is
1300  * primarily needed by knfsd.
1301  *
1302  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1303  */
1304 int write_inode_now(struct inode *inode, int sync)
1305 {
1306         int ret;
1307         struct writeback_control wbc = {
1308                 .nr_to_write = LONG_MAX,
1309                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1310                 .range_start = 0,
1311                 .range_end = LLONG_MAX,
1312         };
1313
1314         if (!mapping_cap_writeback_dirty(inode->i_mapping))
1315                 wbc.nr_to_write = 0;
1316
1317         might_sleep();
1318         spin_lock(&inode_wb_list_lock);
1319         spin_lock(&inode->i_lock);
1320         ret = writeback_single_inode(inode, &wbc);
1321         spin_unlock(&inode->i_lock);
1322         spin_unlock(&inode_wb_list_lock);
1323         if (sync)
1324                 inode_sync_wait(inode);
1325         return ret;
1326 }
1327 EXPORT_SYMBOL(write_inode_now);
1328
1329 /**
1330  * sync_inode - write an inode and its pages to disk.
1331  * @inode: the inode to sync
1332  * @wbc: controls the writeback mode
1333  *
1334  * sync_inode() will write an inode and its pages to disk.  It will also
1335  * correctly update the inode on its superblock's dirty inode lists and will
1336  * update inode->i_state.
1337  *
1338  * The caller must have a ref on the inode.
1339  */
1340 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1341 {
1342         int ret;
1343
1344         spin_lock(&inode_wb_list_lock);
1345         spin_lock(&inode->i_lock);
1346         ret = writeback_single_inode(inode, wbc);
1347         spin_unlock(&inode->i_lock);
1348         spin_unlock(&inode_wb_list_lock);
1349         return ret;
1350 }
1351 EXPORT_SYMBOL(sync_inode);
1352
1353 /**
1354  * sync_inode_metadata - write an inode to disk
1355  * @inode: the inode to sync
1356  * @wait: wait for I/O to complete.
1357  *
1358  * Write an inode to disk and adjust its dirty state after completion.
1359  *
1360  * Note: only writes the actual inode, no associated data or other metadata.
1361  */
1362 int sync_inode_metadata(struct inode *inode, int wait)
1363 {
1364         struct writeback_control wbc = {
1365                 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1366                 .nr_to_write = 0, /* metadata-only */
1367         };
1368
1369         return sync_inode(inode, &wbc);
1370 }
1371 EXPORT_SYMBOL(sync_inode_metadata);