Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / fs-writeback.c
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002    Andrew Morton
12  *              Split out of fs/inode.c
13  *              Additions for address_space-based writeback
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kthread.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/tracepoint.h>
29 #include <linux/device.h>
30 #include "internal.h"
31
32 /*
33  * 4MB minimal write chunk size
34  */
35 #define MIN_WRITEBACK_PAGES     (4096UL >> (PAGE_CACHE_SHIFT - 10))
36
37 /*
38  * Passed into wb_writeback(), essentially a subset of writeback_control
39  */
40 struct wb_writeback_work {
41         long nr_pages;
42         struct super_block *sb;
43         /*
44          * Write only inodes dirtied before this time. Don't forget to set
45          * older_than_this_is_set when you set this.
46          */
47         unsigned long older_than_this;
48         enum writeback_sync_modes sync_mode;
49         unsigned int tagged_writepages:1;
50         unsigned int for_kupdate:1;
51         unsigned int range_cyclic:1;
52         unsigned int for_background:1;
53         unsigned int for_sync:1;        /* sync(2) WB_SYNC_ALL writeback */
54         unsigned int older_than_this_is_set:1;
55         enum wb_reason reason;          /* why was writeback initiated? */
56
57         struct list_head list;          /* pending work list */
58         struct completion *done;        /* set if the caller waits */
59 };
60
61 /**
62  * writeback_in_progress - determine whether there is writeback in progress
63  * @bdi: the device's backing_dev_info structure.
64  *
65  * Determine whether there is writeback waiting to be handled against a
66  * backing device.
67  */
68 int writeback_in_progress(struct backing_dev_info *bdi)
69 {
70         return test_bit(BDI_writeback_running, &bdi->state);
71 }
72 EXPORT_SYMBOL(writeback_in_progress);
73
74 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
75 {
76         struct super_block *sb = inode->i_sb;
77
78         if (sb_is_blkdev_sb(sb))
79                 return inode->i_mapping->backing_dev_info;
80
81         return sb->s_bdi;
82 }
83
84 static inline struct inode *wb_inode(struct list_head *head)
85 {
86         return list_entry(head, struct inode, i_wb_list);
87 }
88
89 /*
90  * Include the creation of the trace points after defining the
91  * wb_writeback_work structure and inline functions so that the definition
92  * remains local to this file.
93  */
94 #define CREATE_TRACE_POINTS
95 #include <trace/events/writeback.h>
96
97 static void bdi_queue_work(struct backing_dev_info *bdi,
98                            struct wb_writeback_work *work)
99 {
100         trace_writeback_queue(bdi, work);
101
102         spin_lock_bh(&bdi->wb_lock);
103         list_add_tail(&work->list, &bdi->work_list);
104         spin_unlock_bh(&bdi->wb_lock);
105
106         mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
107 }
108
109 static void
110 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
111                       bool range_cyclic, enum wb_reason reason)
112 {
113         struct wb_writeback_work *work;
114
115         /*
116          * This is WB_SYNC_NONE writeback, so if allocation fails just
117          * wakeup the thread for old dirty data writeback
118          */
119         work = kzalloc(sizeof(*work), GFP_ATOMIC);
120         if (!work) {
121                 trace_writeback_nowork(bdi);
122                 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
123                 return;
124         }
125
126         work->sync_mode = WB_SYNC_NONE;
127         work->nr_pages  = nr_pages;
128         work->range_cyclic = range_cyclic;
129         work->reason    = reason;
130
131         bdi_queue_work(bdi, work);
132 }
133
134 /**
135  * bdi_start_writeback - start writeback
136  * @bdi: the backing device to write from
137  * @nr_pages: the number of pages to write
138  * @reason: reason why some writeback work was initiated
139  *
140  * Description:
141  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
142  *   started when this function returns, we make no guarantees on
143  *   completion. Caller need not hold sb s_umount semaphore.
144  *
145  */
146 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
147                         enum wb_reason reason)
148 {
149         __bdi_start_writeback(bdi, nr_pages, true, reason);
150 }
151
152 /**
153  * bdi_start_background_writeback - start background writeback
154  * @bdi: the backing device to write from
155  *
156  * Description:
157  *   This makes sure WB_SYNC_NONE background writeback happens. When
158  *   this function returns, it is only guaranteed that for given BDI
159  *   some IO is happening if we are over background dirty threshold.
160  *   Caller need not hold sb s_umount semaphore.
161  */
162 void bdi_start_background_writeback(struct backing_dev_info *bdi)
163 {
164         /*
165          * We just wake up the flusher thread. It will perform background
166          * writeback as soon as there is no other work to do.
167          */
168         trace_writeback_wake_background(bdi);
169         mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
170 }
171
172 /*
173  * Remove the inode from the writeback list it is on.
174  */
175 void inode_wb_list_del(struct inode *inode)
176 {
177         struct backing_dev_info *bdi = inode_to_bdi(inode);
178
179         spin_lock(&bdi->wb.list_lock);
180         list_del_init(&inode->i_wb_list);
181         spin_unlock(&bdi->wb.list_lock);
182 }
183
184 /*
185  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
186  * furthest end of its superblock's dirty-inode list.
187  *
188  * Before stamping the inode's ->dirtied_when, we check to see whether it is
189  * already the most-recently-dirtied inode on the b_dirty list.  If that is
190  * the case then the inode must have been redirtied while it was being written
191  * out and we don't reset its dirtied_when.
192  */
193 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
194 {
195         assert_spin_locked(&wb->list_lock);
196         if (!list_empty(&wb->b_dirty)) {
197                 struct inode *tail;
198
199                 tail = wb_inode(wb->b_dirty.next);
200                 if (time_before(inode->dirtied_when, tail->dirtied_when))
201                         inode->dirtied_when = jiffies;
202         }
203         list_move(&inode->i_wb_list, &wb->b_dirty);
204 }
205
206 /*
207  * requeue inode for re-scanning after bdi->b_io list is exhausted.
208  */
209 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
210 {
211         assert_spin_locked(&wb->list_lock);
212         list_move(&inode->i_wb_list, &wb->b_more_io);
213 }
214
215 static void inode_sync_complete(struct inode *inode)
216 {
217         inode->i_state &= ~I_SYNC;
218         /* If inode is clean an unused, put it into LRU now... */
219         inode_add_lru(inode);
220         /* Waiters must see I_SYNC cleared before being woken up */
221         smp_mb();
222         wake_up_bit(&inode->i_state, __I_SYNC);
223 }
224
225 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
226 {
227         bool ret = time_after(inode->dirtied_when, t);
228 #ifndef CONFIG_64BIT
229         /*
230          * For inodes being constantly redirtied, dirtied_when can get stuck.
231          * It _appears_ to be in the future, but is actually in distant past.
232          * This test is necessary to prevent such wrapped-around relative times
233          * from permanently stopping the whole bdi writeback.
234          */
235         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
236 #endif
237         return ret;
238 }
239
240 /*
241  * Move expired (dirtied before work->older_than_this) dirty inodes from
242  * @delaying_queue to @dispatch_queue.
243  */
244 static int move_expired_inodes(struct list_head *delaying_queue,
245                                struct list_head *dispatch_queue,
246                                struct wb_writeback_work *work)
247 {
248         LIST_HEAD(tmp);
249         struct list_head *pos, *node;
250         struct super_block *sb = NULL;
251         struct inode *inode;
252         int do_sb_sort = 0;
253         int moved = 0;
254
255         WARN_ON_ONCE(!work->older_than_this_is_set);
256         while (!list_empty(delaying_queue)) {
257                 inode = wb_inode(delaying_queue->prev);
258                 if (inode_dirtied_after(inode, work->older_than_this))
259                         break;
260                 list_move(&inode->i_wb_list, &tmp);
261                 moved++;
262                 if (sb_is_blkdev_sb(inode->i_sb))
263                         continue;
264                 if (sb && sb != inode->i_sb)
265                         do_sb_sort = 1;
266                 sb = inode->i_sb;
267         }
268
269         /* just one sb in list, splice to dispatch_queue and we're done */
270         if (!do_sb_sort) {
271                 list_splice(&tmp, dispatch_queue);
272                 goto out;
273         }
274
275         /* Move inodes from one superblock together */
276         while (!list_empty(&tmp)) {
277                 sb = wb_inode(tmp.prev)->i_sb;
278                 list_for_each_prev_safe(pos, node, &tmp) {
279                         inode = wb_inode(pos);
280                         if (inode->i_sb == sb)
281                                 list_move(&inode->i_wb_list, dispatch_queue);
282                 }
283         }
284 out:
285         return moved;
286 }
287
288 /*
289  * Queue all expired dirty inodes for io, eldest first.
290  * Before
291  *         newly dirtied     b_dirty    b_io    b_more_io
292  *         =============>    gf         edc     BA
293  * After
294  *         newly dirtied     b_dirty    b_io    b_more_io
295  *         =============>    g          fBAedc
296  *                                           |
297  *                                           +--> dequeue for IO
298  */
299 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
300 {
301         int moved;
302         assert_spin_locked(&wb->list_lock);
303         list_splice_init(&wb->b_more_io, &wb->b_io);
304         moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
305         trace_writeback_queue_io(wb, work, moved);
306 }
307
308 static int write_inode(struct inode *inode, struct writeback_control *wbc)
309 {
310         int ret;
311
312         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
313                 trace_writeback_write_inode_start(inode, wbc);
314                 ret = inode->i_sb->s_op->write_inode(inode, wbc);
315                 trace_writeback_write_inode(inode, wbc);
316                 return ret;
317         }
318         return 0;
319 }
320
321 /*
322  * Wait for writeback on an inode to complete. Called with i_lock held.
323  * Caller must make sure inode cannot go away when we drop i_lock.
324  */
325 static void __inode_wait_for_writeback(struct inode *inode)
326         __releases(inode->i_lock)
327         __acquires(inode->i_lock)
328 {
329         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
330         wait_queue_head_t *wqh;
331
332         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
333         while (inode->i_state & I_SYNC) {
334                 spin_unlock(&inode->i_lock);
335                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
336                 spin_lock(&inode->i_lock);
337         }
338 }
339
340 /*
341  * Wait for writeback on an inode to complete. Caller must have inode pinned.
342  */
343 void inode_wait_for_writeback(struct inode *inode)
344 {
345         spin_lock(&inode->i_lock);
346         __inode_wait_for_writeback(inode);
347         spin_unlock(&inode->i_lock);
348 }
349
350 /*
351  * Sleep until I_SYNC is cleared. This function must be called with i_lock
352  * held and drops it. It is aimed for callers not holding any inode reference
353  * so once i_lock is dropped, inode can go away.
354  */
355 static void inode_sleep_on_writeback(struct inode *inode)
356         __releases(inode->i_lock)
357 {
358         DEFINE_WAIT(wait);
359         wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
360         int sleep;
361
362         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
363         sleep = inode->i_state & I_SYNC;
364         spin_unlock(&inode->i_lock);
365         if (sleep)
366                 schedule();
367         finish_wait(wqh, &wait);
368 }
369
370 /*
371  * Find proper writeback list for the inode depending on its current state and
372  * possibly also change of its state while we were doing writeback.  Here we
373  * handle things such as livelock prevention or fairness of writeback among
374  * inodes. This function can be called only by flusher thread - noone else
375  * processes all inodes in writeback lists and requeueing inodes behind flusher
376  * thread's back can have unexpected consequences.
377  */
378 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
379                           struct writeback_control *wbc)
380 {
381         if (inode->i_state & I_FREEING)
382                 return;
383
384         /*
385          * Sync livelock prevention. Each inode is tagged and synced in one
386          * shot. If still dirty, it will be redirty_tail()'ed below.  Update
387          * the dirty time to prevent enqueue and sync it again.
388          */
389         if ((inode->i_state & I_DIRTY) &&
390             (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
391                 inode->dirtied_when = jiffies;
392
393         if (wbc->pages_skipped) {
394                 /*
395                  * writeback is not making progress due to locked
396                  * buffers. Skip this inode for now.
397                  */
398                 redirty_tail(inode, wb);
399                 return;
400         }
401
402         if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
403                 /*
404                  * We didn't write back all the pages.  nfs_writepages()
405                  * sometimes bales out without doing anything.
406                  */
407                 if (wbc->nr_to_write <= 0) {
408                         /* Slice used up. Queue for next turn. */
409                         requeue_io(inode, wb);
410                 } else {
411                         /*
412                          * Writeback blocked by something other than
413                          * congestion. Delay the inode for some time to
414                          * avoid spinning on the CPU (100% iowait)
415                          * retrying writeback of the dirty page/inode
416                          * that cannot be performed immediately.
417                          */
418                         redirty_tail(inode, wb);
419                 }
420         } else if (inode->i_state & I_DIRTY) {
421                 /*
422                  * Filesystems can dirty the inode during writeback operations,
423                  * such as delayed allocation during submission or metadata
424                  * updates after data IO completion.
425                  */
426                 redirty_tail(inode, wb);
427         } else {
428                 /* The inode is clean. Remove from writeback lists. */
429                 list_del_init(&inode->i_wb_list);
430         }
431 }
432
433 /*
434  * Write out an inode and its dirty pages. Do not update the writeback list
435  * linkage. That is left to the caller. The caller is also responsible for
436  * setting I_SYNC flag and calling inode_sync_complete() to clear it.
437  */
438 static int
439 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
440 {
441         struct address_space *mapping = inode->i_mapping;
442         long nr_to_write = wbc->nr_to_write;
443         unsigned dirty;
444         int ret;
445
446         WARN_ON(!(inode->i_state & I_SYNC));
447
448         trace_writeback_single_inode_start(inode, wbc, nr_to_write);
449
450         ret = do_writepages(mapping, wbc);
451
452         /*
453          * Make sure to wait on the data before writing out the metadata.
454          * This is important for filesystems that modify metadata on data
455          * I/O completion. We don't do it for sync(2) writeback because it has a
456          * separate, external IO completion path and ->sync_fs for guaranteeing
457          * inode metadata is written back correctly.
458          */
459         if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
460                 int err = filemap_fdatawait(mapping);
461                 if (ret == 0)
462                         ret = err;
463         }
464
465         /*
466          * Some filesystems may redirty the inode during the writeback
467          * due to delalloc, clear dirty metadata flags right before
468          * write_inode()
469          */
470         spin_lock(&inode->i_lock);
471         /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
472         if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
473                 inode->i_state &= ~I_DIRTY_PAGES;
474         dirty = inode->i_state & I_DIRTY;
475         inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
476         spin_unlock(&inode->i_lock);
477         /* Don't write the inode if only I_DIRTY_PAGES was set */
478         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
479                 int err = write_inode(inode, wbc);
480                 if (ret == 0)
481                         ret = err;
482         }
483         trace_writeback_single_inode(inode, wbc, nr_to_write);
484         return ret;
485 }
486
487 /*
488  * Write out an inode's dirty pages. Either the caller has an active reference
489  * on the inode or the inode has I_WILL_FREE set.
490  *
491  * This function is designed to be called for writing back one inode which
492  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
493  * and does more profound writeback list handling in writeback_sb_inodes().
494  */
495 static int
496 writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
497                        struct writeback_control *wbc)
498 {
499         int ret = 0;
500
501         spin_lock(&inode->i_lock);
502         if (!atomic_read(&inode->i_count))
503                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
504         else
505                 WARN_ON(inode->i_state & I_WILL_FREE);
506
507         if (inode->i_state & I_SYNC) {
508                 if (wbc->sync_mode != WB_SYNC_ALL)
509                         goto out;
510                 /*
511                  * It's a data-integrity sync. We must wait. Since callers hold
512                  * inode reference or inode has I_WILL_FREE set, it cannot go
513                  * away under us.
514                  */
515                 __inode_wait_for_writeback(inode);
516         }
517         WARN_ON(inode->i_state & I_SYNC);
518         /*
519          * Skip inode if it is clean. We don't want to mess with writeback
520          * lists in this function since flusher thread may be doing for example
521          * sync in parallel and if we move the inode, it could get skipped. So
522          * here we make sure inode is on some writeback list and leave it there
523          * unless we have completely cleaned the inode.
524          */
525         if (!(inode->i_state & I_DIRTY))
526                 goto out;
527         inode->i_state |= I_SYNC;
528         spin_unlock(&inode->i_lock);
529
530         ret = __writeback_single_inode(inode, wbc);
531
532         spin_lock(&wb->list_lock);
533         spin_lock(&inode->i_lock);
534         /*
535          * If inode is clean, remove it from writeback lists. Otherwise don't
536          * touch it. See comment above for explanation.
537          */
538         if (!(inode->i_state & I_DIRTY))
539                 list_del_init(&inode->i_wb_list);
540         spin_unlock(&wb->list_lock);
541         inode_sync_complete(inode);
542 out:
543         spin_unlock(&inode->i_lock);
544         return ret;
545 }
546
547 static long writeback_chunk_size(struct backing_dev_info *bdi,
548                                  struct wb_writeback_work *work)
549 {
550         long pages;
551
552         /*
553          * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
554          * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
555          * here avoids calling into writeback_inodes_wb() more than once.
556          *
557          * The intended call sequence for WB_SYNC_ALL writeback is:
558          *
559          *      wb_writeback()
560          *          writeback_sb_inodes()       <== called only once
561          *              write_cache_pages()     <== called once for each inode
562          *                   (quickly) tag currently dirty pages
563          *                   (maybe slowly) sync all tagged pages
564          */
565         if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
566                 pages = LONG_MAX;
567         else {
568                 pages = min(bdi->avg_write_bandwidth / 2,
569                             global_dirty_limit / DIRTY_SCOPE);
570                 pages = min(pages, work->nr_pages);
571                 pages = round_down(pages + MIN_WRITEBACK_PAGES,
572                                    MIN_WRITEBACK_PAGES);
573         }
574
575         return pages;
576 }
577
578 /*
579  * Write a portion of b_io inodes which belong to @sb.
580  *
581  * Return the number of pages and/or inodes written.
582  */
583 static long writeback_sb_inodes(struct super_block *sb,
584                                 struct bdi_writeback *wb,
585                                 struct wb_writeback_work *work)
586 {
587         struct writeback_control wbc = {
588                 .sync_mode              = work->sync_mode,
589                 .tagged_writepages      = work->tagged_writepages,
590                 .for_kupdate            = work->for_kupdate,
591                 .for_background         = work->for_background,
592                 .for_sync               = work->for_sync,
593                 .range_cyclic           = work->range_cyclic,
594                 .range_start            = 0,
595                 .range_end              = LLONG_MAX,
596         };
597         unsigned long start_time = jiffies;
598         long write_chunk;
599         long wrote = 0;  /* count both pages and inodes */
600
601         while (!list_empty(&wb->b_io)) {
602                 struct inode *inode = wb_inode(wb->b_io.prev);
603
604                 if (inode->i_sb != sb) {
605                         if (work->sb) {
606                                 /*
607                                  * We only want to write back data for this
608                                  * superblock, move all inodes not belonging
609                                  * to it back onto the dirty list.
610                                  */
611                                 redirty_tail(inode, wb);
612                                 continue;
613                         }
614
615                         /*
616                          * The inode belongs to a different superblock.
617                          * Bounce back to the caller to unpin this and
618                          * pin the next superblock.
619                          */
620                         break;
621                 }
622
623                 /*
624                  * Don't bother with new inodes or inodes being freed, first
625                  * kind does not need periodic writeout yet, and for the latter
626                  * kind writeout is handled by the freer.
627                  */
628                 spin_lock(&inode->i_lock);
629                 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
630                         spin_unlock(&inode->i_lock);
631                         redirty_tail(inode, wb);
632                         continue;
633                 }
634                 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
635                         /*
636                          * If this inode is locked for writeback and we are not
637                          * doing writeback-for-data-integrity, move it to
638                          * b_more_io so that writeback can proceed with the
639                          * other inodes on s_io.
640                          *
641                          * We'll have another go at writing back this inode
642                          * when we completed a full scan of b_io.
643                          */
644                         spin_unlock(&inode->i_lock);
645                         requeue_io(inode, wb);
646                         trace_writeback_sb_inodes_requeue(inode);
647                         continue;
648                 }
649                 spin_unlock(&wb->list_lock);
650
651                 /*
652                  * We already requeued the inode if it had I_SYNC set and we
653                  * are doing WB_SYNC_NONE writeback. So this catches only the
654                  * WB_SYNC_ALL case.
655                  */
656                 if (inode->i_state & I_SYNC) {
657                         /* Wait for I_SYNC. This function drops i_lock... */
658                         inode_sleep_on_writeback(inode);
659                         /* Inode may be gone, start again */
660                         spin_lock(&wb->list_lock);
661                         continue;
662                 }
663                 inode->i_state |= I_SYNC;
664                 spin_unlock(&inode->i_lock);
665
666                 write_chunk = writeback_chunk_size(wb->bdi, work);
667                 wbc.nr_to_write = write_chunk;
668                 wbc.pages_skipped = 0;
669
670                 /*
671                  * We use I_SYNC to pin the inode in memory. While it is set
672                  * evict_inode() will wait so the inode cannot be freed.
673                  */
674                 __writeback_single_inode(inode, &wbc);
675
676                 work->nr_pages -= write_chunk - wbc.nr_to_write;
677                 wrote += write_chunk - wbc.nr_to_write;
678                 spin_lock(&wb->list_lock);
679                 spin_lock(&inode->i_lock);
680                 if (!(inode->i_state & I_DIRTY))
681                         wrote++;
682                 requeue_inode(inode, wb, &wbc);
683                 inode_sync_complete(inode);
684                 spin_unlock(&inode->i_lock);
685                 cond_resched_lock(&wb->list_lock);
686                 /*
687                  * bail out to wb_writeback() often enough to check
688                  * background threshold and other termination conditions.
689                  */
690                 if (wrote) {
691                         if (time_is_before_jiffies(start_time + HZ / 10UL))
692                                 break;
693                         if (work->nr_pages <= 0)
694                                 break;
695                 }
696         }
697         return wrote;
698 }
699
700 static long __writeback_inodes_wb(struct bdi_writeback *wb,
701                                   struct wb_writeback_work *work)
702 {
703         unsigned long start_time = jiffies;
704         long wrote = 0;
705
706         while (!list_empty(&wb->b_io)) {
707                 struct inode *inode = wb_inode(wb->b_io.prev);
708                 struct super_block *sb = inode->i_sb;
709
710                 if (!grab_super_passive(sb)) {
711                         /*
712                          * grab_super_passive() may fail consistently due to
713                          * s_umount being grabbed by someone else. Don't use
714                          * requeue_io() to avoid busy retrying the inode/sb.
715                          */
716                         redirty_tail(inode, wb);
717                         continue;
718                 }
719                 wrote += writeback_sb_inodes(sb, wb, work);
720                 drop_super(sb);
721
722                 /* refer to the same tests at the end of writeback_sb_inodes */
723                 if (wrote) {
724                         if (time_is_before_jiffies(start_time + HZ / 10UL))
725                                 break;
726                         if (work->nr_pages <= 0)
727                                 break;
728                 }
729         }
730         /* Leave any unwritten inodes on b_io */
731         return wrote;
732 }
733
734 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
735                                 enum wb_reason reason)
736 {
737         struct wb_writeback_work work = {
738                 .nr_pages       = nr_pages,
739                 .sync_mode      = WB_SYNC_NONE,
740                 .range_cyclic   = 1,
741                 .reason         = reason,
742                 .older_than_this = jiffies,
743                 .older_than_this_is_set = 1,
744         };
745
746         spin_lock(&wb->list_lock);
747         if (list_empty(&wb->b_io))
748                 queue_io(wb, &work);
749         __writeback_inodes_wb(wb, &work);
750         spin_unlock(&wb->list_lock);
751
752         return nr_pages - work.nr_pages;
753 }
754
755 static bool over_bground_thresh(struct backing_dev_info *bdi)
756 {
757         unsigned long background_thresh, dirty_thresh;
758
759         global_dirty_limits(&background_thresh, &dirty_thresh);
760
761         if (global_page_state(NR_FILE_DIRTY) +
762             global_page_state(NR_UNSTABLE_NFS) > background_thresh)
763                 return true;
764
765         if (bdi_stat(bdi, BDI_RECLAIMABLE) >
766                                 bdi_dirty_limit(bdi, background_thresh))
767                 return true;
768
769         return false;
770 }
771
772 /*
773  * Called under wb->list_lock. If there are multiple wb per bdi,
774  * only the flusher working on the first wb should do it.
775  */
776 static void wb_update_bandwidth(struct bdi_writeback *wb,
777                                 unsigned long start_time)
778 {
779         __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
780 }
781
782 /*
783  * Explicit flushing or periodic writeback of "old" data.
784  *
785  * Define "old": the first time one of an inode's pages is dirtied, we mark the
786  * dirtying-time in the inode's address_space.  So this periodic writeback code
787  * just walks the superblock inode list, writing back any inodes which are
788  * older than a specific point in time.
789  *
790  * Try to run once per dirty_writeback_interval.  But if a writeback event
791  * takes longer than a dirty_writeback_interval interval, then leave a
792  * one-second gap.
793  *
794  * older_than_this takes precedence over nr_to_write.  So we'll only write back
795  * all dirty pages if they are all attached to "old" mappings.
796  */
797 static long wb_writeback(struct bdi_writeback *wb,
798                          struct wb_writeback_work *work)
799 {
800         unsigned long wb_start = jiffies;
801         long nr_pages = work->nr_pages;
802         struct inode *inode;
803         long progress;
804
805         if (!work->older_than_this_is_set) {
806                 work->older_than_this = jiffies;
807                 work->older_than_this_is_set = 1;
808         }
809
810         spin_lock(&wb->list_lock);
811         for (;;) {
812                 /*
813                  * Stop writeback when nr_pages has been consumed
814                  */
815                 if (work->nr_pages <= 0)
816                         break;
817
818                 /*
819                  * Background writeout and kupdate-style writeback may
820                  * run forever. Stop them if there is other work to do
821                  * so that e.g. sync can proceed. They'll be restarted
822                  * after the other works are all done.
823                  */
824                 if ((work->for_background || work->for_kupdate) &&
825                     !list_empty(&wb->bdi->work_list))
826                         break;
827
828                 /*
829                  * For background writeout, stop when we are below the
830                  * background dirty threshold
831                  */
832                 if (work->for_background && !over_bground_thresh(wb->bdi))
833                         break;
834
835                 /*
836                  * Kupdate and background works are special and we want to
837                  * include all inodes that need writing. Livelock avoidance is
838                  * handled by these works yielding to any other work so we are
839                  * safe.
840                  */
841                 if (work->for_kupdate) {
842                         work->older_than_this = jiffies -
843                                 msecs_to_jiffies(dirty_expire_interval * 10);
844                 } else if (work->for_background)
845                         work->older_than_this = jiffies;
846
847                 trace_writeback_start(wb->bdi, work);
848                 if (list_empty(&wb->b_io))
849                         queue_io(wb, work);
850                 if (work->sb)
851                         progress = writeback_sb_inodes(work->sb, wb, work);
852                 else
853                         progress = __writeback_inodes_wb(wb, work);
854                 trace_writeback_written(wb->bdi, work);
855
856                 wb_update_bandwidth(wb, wb_start);
857
858                 /*
859                  * Did we write something? Try for more
860                  *
861                  * Dirty inodes are moved to b_io for writeback in batches.
862                  * The completion of the current batch does not necessarily
863                  * mean the overall work is done. So we keep looping as long
864                  * as made some progress on cleaning pages or inodes.
865                  */
866                 if (progress)
867                         continue;
868                 /*
869                  * No more inodes for IO, bail
870                  */
871                 if (list_empty(&wb->b_more_io))
872                         break;
873                 /*
874                  * Nothing written. Wait for some inode to
875                  * become available for writeback. Otherwise
876                  * we'll just busyloop.
877                  */
878                 if (!list_empty(&wb->b_more_io))  {
879                         trace_writeback_wait(wb->bdi, work);
880                         inode = wb_inode(wb->b_more_io.prev);
881                         spin_lock(&inode->i_lock);
882                         spin_unlock(&wb->list_lock);
883                         /* This function drops i_lock... */
884                         inode_sleep_on_writeback(inode);
885                         spin_lock(&wb->list_lock);
886                 }
887         }
888         spin_unlock(&wb->list_lock);
889
890         return nr_pages - work->nr_pages;
891 }
892
893 /*
894  * Return the next wb_writeback_work struct that hasn't been processed yet.
895  */
896 static struct wb_writeback_work *
897 get_next_work_item(struct backing_dev_info *bdi)
898 {
899         struct wb_writeback_work *work = NULL;
900
901         spin_lock_bh(&bdi->wb_lock);
902         if (!list_empty(&bdi->work_list)) {
903                 work = list_entry(bdi->work_list.next,
904                                   struct wb_writeback_work, list);
905                 list_del_init(&work->list);
906         }
907         spin_unlock_bh(&bdi->wb_lock);
908         return work;
909 }
910
911 /*
912  * Add in the number of potentially dirty inodes, because each inode
913  * write can dirty pagecache in the underlying blockdev.
914  */
915 static unsigned long get_nr_dirty_pages(void)
916 {
917         return global_page_state(NR_FILE_DIRTY) +
918                 global_page_state(NR_UNSTABLE_NFS) +
919                 get_nr_dirty_inodes();
920 }
921
922 static long wb_check_background_flush(struct bdi_writeback *wb)
923 {
924         if (over_bground_thresh(wb->bdi)) {
925
926                 struct wb_writeback_work work = {
927                         .nr_pages       = LONG_MAX,
928                         .sync_mode      = WB_SYNC_NONE,
929                         .for_background = 1,
930                         .range_cyclic   = 1,
931                         .reason         = WB_REASON_BACKGROUND,
932                 };
933
934                 return wb_writeback(wb, &work);
935         }
936
937         return 0;
938 }
939
940 static long wb_check_old_data_flush(struct bdi_writeback *wb)
941 {
942         unsigned long expired;
943         long nr_pages;
944
945         /*
946          * When set to zero, disable periodic writeback
947          */
948         if (!dirty_writeback_interval)
949                 return 0;
950
951         expired = wb->last_old_flush +
952                         msecs_to_jiffies(dirty_writeback_interval * 10);
953         if (time_before(jiffies, expired))
954                 return 0;
955
956         wb->last_old_flush = jiffies;
957         nr_pages = get_nr_dirty_pages();
958
959         if (nr_pages) {
960                 struct wb_writeback_work work = {
961                         .nr_pages       = nr_pages,
962                         .sync_mode      = WB_SYNC_NONE,
963                         .for_kupdate    = 1,
964                         .range_cyclic   = 1,
965                         .reason         = WB_REASON_PERIODIC,
966                 };
967
968                 return wb_writeback(wb, &work);
969         }
970
971         return 0;
972 }
973
974 /*
975  * Retrieve work items and do the writeback they describe
976  */
977 static long wb_do_writeback(struct bdi_writeback *wb)
978 {
979         struct backing_dev_info *bdi = wb->bdi;
980         struct wb_writeback_work *work;
981         long wrote = 0;
982
983         set_bit(BDI_writeback_running, &wb->bdi->state);
984         while ((work = get_next_work_item(bdi)) != NULL) {
985
986                 trace_writeback_exec(bdi, work);
987
988                 wrote += wb_writeback(wb, work);
989
990                 /*
991                  * Notify the caller of completion if this is a synchronous
992                  * work item, otherwise just free it.
993                  */
994                 if (work->done)
995                         complete(work->done);
996                 else
997                         kfree(work);
998         }
999
1000         /*
1001          * Check for periodic writeback, kupdated() style
1002          */
1003         wrote += wb_check_old_data_flush(wb);
1004         wrote += wb_check_background_flush(wb);
1005         clear_bit(BDI_writeback_running, &wb->bdi->state);
1006
1007         return wrote;
1008 }
1009
1010 /*
1011  * Handle writeback of dirty data for the device backed by this bdi. Also
1012  * reschedules periodically and does kupdated style flushing.
1013  */
1014 void bdi_writeback_workfn(struct work_struct *work)
1015 {
1016         struct bdi_writeback *wb = container_of(to_delayed_work(work),
1017                                                 struct bdi_writeback, dwork);
1018         struct backing_dev_info *bdi = wb->bdi;
1019         long pages_written;
1020
1021         set_worker_desc("flush-%s", dev_name(bdi->dev));
1022         current->flags |= PF_SWAPWRITE;
1023
1024         if (likely(!current_is_workqueue_rescuer() ||
1025                    list_empty(&bdi->bdi_list))) {
1026                 /*
1027                  * The normal path.  Keep writing back @bdi until its
1028                  * work_list is empty.  Note that this path is also taken
1029                  * if @bdi is shutting down even when we're running off the
1030                  * rescuer as work_list needs to be drained.
1031                  */
1032                 do {
1033                         pages_written = wb_do_writeback(wb);
1034                         trace_writeback_pages_written(pages_written);
1035                 } while (!list_empty(&bdi->work_list));
1036         } else {
1037                 /*
1038                  * bdi_wq can't get enough workers and we're running off
1039                  * the emergency worker.  Don't hog it.  Hopefully, 1024 is
1040                  * enough for efficient IO.
1041                  */
1042                 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1043                                                     WB_REASON_FORKER_THREAD);
1044                 trace_writeback_pages_written(pages_written);
1045         }
1046
1047         if (!list_empty(&bdi->work_list) ||
1048             (wb_has_dirty_io(wb) && dirty_writeback_interval))
1049                 queue_delayed_work(bdi_wq, &wb->dwork,
1050                         msecs_to_jiffies(dirty_writeback_interval * 10));
1051
1052         current->flags &= ~PF_SWAPWRITE;
1053 }
1054
1055 /*
1056  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
1057  * the whole world.
1058  */
1059 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1060 {
1061         struct backing_dev_info *bdi;
1062
1063         if (!nr_pages)
1064                 nr_pages = get_nr_dirty_pages();
1065
1066         rcu_read_lock();
1067         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1068                 if (!bdi_has_dirty_io(bdi))
1069                         continue;
1070                 __bdi_start_writeback(bdi, nr_pages, false, reason);
1071         }
1072         rcu_read_unlock();
1073 }
1074
1075 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1076 {
1077         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1078                 struct dentry *dentry;
1079                 const char *name = "?";
1080
1081                 dentry = d_find_alias(inode);
1082                 if (dentry) {
1083                         spin_lock(&dentry->d_lock);
1084                         name = (const char *) dentry->d_name.name;
1085                 }
1086                 printk(KERN_DEBUG
1087                        "%s(%d): dirtied inode %lu (%s) on %s\n",
1088                        current->comm, task_pid_nr(current), inode->i_ino,
1089                        name, inode->i_sb->s_id);
1090                 if (dentry) {
1091                         spin_unlock(&dentry->d_lock);
1092                         dput(dentry);
1093                 }
1094         }
1095 }
1096
1097 /**
1098  *      __mark_inode_dirty -    internal function
1099  *      @inode: inode to mark
1100  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1101  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
1102  *      mark_inode_dirty_sync.
1103  *
1104  * Put the inode on the super block's dirty list.
1105  *
1106  * CAREFUL! We mark it dirty unconditionally, but move it onto the
1107  * dirty list only if it is hashed or if it refers to a blockdev.
1108  * If it was not hashed, it will never be added to the dirty list
1109  * even if it is later hashed, as it will have been marked dirty already.
1110  *
1111  * In short, make sure you hash any inodes _before_ you start marking
1112  * them dirty.
1113  *
1114  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1115  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1116  * the kernel-internal blockdev inode represents the dirtying time of the
1117  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1118  * page->mapping->host, so the page-dirtying time is recorded in the internal
1119  * blockdev inode.
1120  */
1121 void __mark_inode_dirty(struct inode *inode, int flags)
1122 {
1123         struct super_block *sb = inode->i_sb;
1124         struct backing_dev_info *bdi = NULL;
1125
1126         /*
1127          * Don't do this for I_DIRTY_PAGES - that doesn't actually
1128          * dirty the inode itself
1129          */
1130         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1131                 trace_writeback_dirty_inode_start(inode, flags);
1132
1133                 if (sb->s_op->dirty_inode)
1134                         sb->s_op->dirty_inode(inode, flags);
1135
1136                 trace_writeback_dirty_inode(inode, flags);
1137         }
1138
1139         /*
1140          * make sure that changes are seen by all cpus before we test i_state
1141          * -- mikulas
1142          */
1143         smp_mb();
1144
1145         /* avoid the locking if we can */
1146         if ((inode->i_state & flags) == flags)
1147                 return;
1148
1149         if (unlikely(block_dump))
1150                 block_dump___mark_inode_dirty(inode);
1151
1152         spin_lock(&inode->i_lock);
1153         if ((inode->i_state & flags) != flags) {
1154                 const int was_dirty = inode->i_state & I_DIRTY;
1155
1156                 inode->i_state |= flags;
1157
1158                 /*
1159                  * If the inode is being synced, just update its dirty state.
1160                  * The unlocker will place the inode on the appropriate
1161                  * superblock list, based upon its state.
1162                  */
1163                 if (inode->i_state & I_SYNC)
1164                         goto out_unlock_inode;
1165
1166                 /*
1167                  * Only add valid (hashed) inodes to the superblock's
1168                  * dirty list.  Add blockdev inodes as well.
1169                  */
1170                 if (!S_ISBLK(inode->i_mode)) {
1171                         if (inode_unhashed(inode))
1172                                 goto out_unlock_inode;
1173                 }
1174                 if (inode->i_state & I_FREEING)
1175                         goto out_unlock_inode;
1176
1177                 /*
1178                  * If the inode was already on b_dirty/b_io/b_more_io, don't
1179                  * reposition it (that would break b_dirty time-ordering).
1180                  */
1181                 if (!was_dirty) {
1182                         bool wakeup_bdi = false;
1183                         bdi = inode_to_bdi(inode);
1184
1185                         spin_unlock(&inode->i_lock);
1186                         spin_lock(&bdi->wb.list_lock);
1187                         if (bdi_cap_writeback_dirty(bdi)) {
1188                                 WARN(!test_bit(BDI_registered, &bdi->state),
1189                                      "bdi-%s not registered\n", bdi->name);
1190
1191                                 /*
1192                                  * If this is the first dirty inode for this
1193                                  * bdi, we have to wake-up the corresponding
1194                                  * bdi thread to make sure background
1195                                  * write-back happens later.
1196                                  */
1197                                 if (!wb_has_dirty_io(&bdi->wb))
1198                                         wakeup_bdi = true;
1199                         }
1200
1201                         inode->dirtied_when = jiffies;
1202                         list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1203                         spin_unlock(&bdi->wb.list_lock);
1204
1205                         if (wakeup_bdi)
1206                                 bdi_wakeup_thread_delayed(bdi);
1207                         return;
1208                 }
1209         }
1210 out_unlock_inode:
1211         spin_unlock(&inode->i_lock);
1212
1213 }
1214 EXPORT_SYMBOL(__mark_inode_dirty);
1215
1216 static void wait_sb_inodes(struct super_block *sb)
1217 {
1218         struct inode *inode, *old_inode = NULL;
1219
1220         /*
1221          * We need to be protected against the filesystem going from
1222          * r/o to r/w or vice versa.
1223          */
1224         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1225
1226         spin_lock(&inode_sb_list_lock);
1227
1228         /*
1229          * Data integrity sync. Must wait for all pages under writeback,
1230          * because there may have been pages dirtied before our sync
1231          * call, but which had writeout started before we write it out.
1232          * In which case, the inode may not be on the dirty list, but
1233          * we still have to wait for that writeout.
1234          */
1235         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1236                 struct address_space *mapping = inode->i_mapping;
1237
1238                 spin_lock(&inode->i_lock);
1239                 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1240                     (mapping->nrpages == 0)) {
1241                         spin_unlock(&inode->i_lock);
1242                         continue;
1243                 }
1244                 __iget(inode);
1245                 spin_unlock(&inode->i_lock);
1246                 spin_unlock(&inode_sb_list_lock);
1247
1248                 /*
1249                  * We hold a reference to 'inode' so it couldn't have been
1250                  * removed from s_inodes list while we dropped the
1251                  * inode_sb_list_lock.  We cannot iput the inode now as we can
1252                  * be holding the last reference and we cannot iput it under
1253                  * inode_sb_list_lock. So we keep the reference and iput it
1254                  * later.
1255                  */
1256                 iput(old_inode);
1257                 old_inode = inode;
1258
1259                 filemap_fdatawait(mapping);
1260
1261                 cond_resched();
1262
1263                 spin_lock(&inode_sb_list_lock);
1264         }
1265         spin_unlock(&inode_sb_list_lock);
1266         iput(old_inode);
1267 }
1268
1269 /**
1270  * writeback_inodes_sb_nr -     writeback dirty inodes from given super_block
1271  * @sb: the superblock
1272  * @nr: the number of pages to write
1273  * @reason: reason why some writeback work initiated
1274  *
1275  * Start writeback on some inodes on this super_block. No guarantees are made
1276  * on how many (if any) will be written, and this function does not wait
1277  * for IO completion of submitted IO.
1278  */
1279 void writeback_inodes_sb_nr(struct super_block *sb,
1280                             unsigned long nr,
1281                             enum wb_reason reason)
1282 {
1283         DECLARE_COMPLETION_ONSTACK(done);
1284         struct wb_writeback_work work = {
1285                 .sb                     = sb,
1286                 .sync_mode              = WB_SYNC_NONE,
1287                 .tagged_writepages      = 1,
1288                 .done                   = &done,
1289                 .nr_pages               = nr,
1290                 .reason                 = reason,
1291         };
1292
1293         if (sb->s_bdi == &noop_backing_dev_info)
1294                 return;
1295         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1296         bdi_queue_work(sb->s_bdi, &work);
1297         wait_for_completion(&done);
1298 }
1299 EXPORT_SYMBOL(writeback_inodes_sb_nr);
1300
1301 /**
1302  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
1303  * @sb: the superblock
1304  * @reason: reason why some writeback work was initiated
1305  *
1306  * Start writeback on some inodes on this super_block. No guarantees are made
1307  * on how many (if any) will be written, and this function does not wait
1308  * for IO completion of submitted IO.
1309  */
1310 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1311 {
1312         return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1313 }
1314 EXPORT_SYMBOL(writeback_inodes_sb);
1315
1316 /**
1317  * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
1318  * @sb: the superblock
1319  * @nr: the number of pages to write
1320  * @reason: the reason of writeback
1321  *
1322  * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
1323  * Returns 1 if writeback was started, 0 if not.
1324  */
1325 int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1326                                   unsigned long nr,
1327                                   enum wb_reason reason)
1328 {
1329         if (writeback_in_progress(sb->s_bdi))
1330                 return 1;
1331
1332         if (!down_read_trylock(&sb->s_umount))
1333                 return 0;
1334
1335         writeback_inodes_sb_nr(sb, nr, reason);
1336         up_read(&sb->s_umount);
1337         return 1;
1338 }
1339 EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1340
1341 /**
1342  * try_to_writeback_inodes_sb - try to start writeback if none underway
1343  * @sb: the superblock
1344  * @reason: reason why some writeback work was initiated
1345  *
1346  * Implement by try_to_writeback_inodes_sb_nr()
1347  * Returns 1 if writeback was started, 0 if not.
1348  */
1349 int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1350 {
1351         return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1352 }
1353 EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1354
1355 /**
1356  * sync_inodes_sb       -       sync sb inode pages
1357  * @sb:                 the superblock
1358  * @older_than_this:    timestamp
1359  *
1360  * This function writes and waits on any dirty inode belonging to this
1361  * superblock that has been dirtied before given timestamp.
1362  */
1363 void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
1364 {
1365         DECLARE_COMPLETION_ONSTACK(done);
1366         struct wb_writeback_work work = {
1367                 .sb             = sb,
1368                 .sync_mode      = WB_SYNC_ALL,
1369                 .nr_pages       = LONG_MAX,
1370                 .older_than_this = older_than_this,
1371                 .older_than_this_is_set = 1,
1372                 .range_cyclic   = 0,
1373                 .done           = &done,
1374                 .reason         = WB_REASON_SYNC,
1375                 .for_sync       = 1,
1376         };
1377
1378         /* Nothing to do? */
1379         if (sb->s_bdi == &noop_backing_dev_info)
1380                 return;
1381         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1382
1383         bdi_queue_work(sb->s_bdi, &work);
1384         wait_for_completion(&done);
1385
1386         wait_sb_inodes(sb);
1387 }
1388 EXPORT_SYMBOL(sync_inodes_sb);
1389
1390 /**
1391  * write_inode_now      -       write an inode to disk
1392  * @inode: inode to write to disk
1393  * @sync: whether the write should be synchronous or not
1394  *
1395  * This function commits an inode to disk immediately if it is dirty. This is
1396  * primarily needed by knfsd.
1397  *
1398  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1399  */
1400 int write_inode_now(struct inode *inode, int sync)
1401 {
1402         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1403         struct writeback_control wbc = {
1404                 .nr_to_write = LONG_MAX,
1405                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1406                 .range_start = 0,
1407                 .range_end = LLONG_MAX,
1408         };
1409
1410         if (!mapping_cap_writeback_dirty(inode->i_mapping))
1411                 wbc.nr_to_write = 0;
1412
1413         might_sleep();
1414         return writeback_single_inode(inode, wb, &wbc);
1415 }
1416 EXPORT_SYMBOL(write_inode_now);
1417
1418 /**
1419  * sync_inode - write an inode and its pages to disk.
1420  * @inode: the inode to sync
1421  * @wbc: controls the writeback mode
1422  *
1423  * sync_inode() will write an inode and its pages to disk.  It will also
1424  * correctly update the inode on its superblock's dirty inode lists and will
1425  * update inode->i_state.
1426  *
1427  * The caller must have a ref on the inode.
1428  */
1429 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1430 {
1431         return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1432 }
1433 EXPORT_SYMBOL(sync_inode);
1434
1435 /**
1436  * sync_inode_metadata - write an inode to disk
1437  * @inode: the inode to sync
1438  * @wait: wait for I/O to complete.
1439  *
1440  * Write an inode to disk and adjust its dirty state after completion.
1441  *
1442  * Note: only writes the actual inode, no associated data or other metadata.
1443  */
1444 int sync_inode_metadata(struct inode *inode, int wait)
1445 {
1446         struct writeback_control wbc = {
1447                 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1448                 .nr_to_write = 0, /* metadata-only */
1449         };
1450
1451         return sync_inode(inode, &wbc);
1452 }
1453 EXPORT_SYMBOL(sync_inode_metadata);