packaging: install license for rpm package instead of license package
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / fs / fs-writeback.c
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002    Andrew Morton
12  *              Split out of fs/inode.c
13  *              Additions for address_space-based writeback
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kthread.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/tracepoint.h>
29 #include "internal.h"
30
31 /*
32  * 4MB minimal write chunk size
33  */
34 #define MIN_WRITEBACK_PAGES     (4096UL >> (PAGE_CACHE_SHIFT - 10))
35
36 /*
37  * Passed into wb_writeback(), essentially a subset of writeback_control
38  */
39 struct wb_writeback_work {
40         long nr_pages;
41         struct super_block *sb;
42         unsigned long *older_than_this;
43         enum writeback_sync_modes sync_mode;
44         unsigned int tagged_writepages:1;
45         unsigned int for_kupdate:1;
46         unsigned int range_cyclic:1;
47         unsigned int for_background:1;
48         enum wb_reason reason;          /* why was writeback initiated? */
49
50         struct list_head list;          /* pending work list */
51         struct completion *done;        /* set if the caller waits */
52 };
53
54 /**
55  * writeback_in_progress - determine whether there is writeback in progress
56  * @bdi: the device's backing_dev_info structure.
57  *
58  * Determine whether there is writeback waiting to be handled against a
59  * backing device.
60  */
61 int writeback_in_progress(struct backing_dev_info *bdi)
62 {
63         return test_bit(BDI_writeback_running, &bdi->state);
64 }
65 EXPORT_SYMBOL(writeback_in_progress);
66
67 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
68 {
69         struct super_block *sb = inode->i_sb;
70
71         if (strcmp(sb->s_type->name, "bdev") == 0)
72                 return inode->i_mapping->backing_dev_info;
73
74         return sb->s_bdi;
75 }
76
77 static inline struct inode *wb_inode(struct list_head *head)
78 {
79         return list_entry(head, struct inode, i_wb_list);
80 }
81
82 /*
83  * Include the creation of the trace points after defining the
84  * wb_writeback_work structure and inline functions so that the definition
85  * remains local to this file.
86  */
87 #define CREATE_TRACE_POINTS
88 #include <trace/events/writeback.h>
89
90 static void bdi_wakeup_thread(struct backing_dev_info *bdi)
91 {
92         spin_lock_bh(&bdi->wb_lock);
93         if (test_bit(BDI_registered, &bdi->state))
94                 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
95         spin_unlock_bh(&bdi->wb_lock);
96 }
97
98 static void bdi_queue_work(struct backing_dev_info *bdi,
99                            struct wb_writeback_work *work)
100 {
101         trace_writeback_queue(bdi, work);
102
103         spin_lock_bh(&bdi->wb_lock);
104         if (!test_bit(BDI_registered, &bdi->state)) {
105                 if (work->done)
106                         complete(work->done);
107                 goto out_unlock;
108         }
109         list_add_tail(&work->list, &bdi->work_list);
110         mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
111 out_unlock:
112         spin_unlock_bh(&bdi->wb_lock);
113 }
114
115 static void
116 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
117                       bool range_cyclic, enum wb_reason reason)
118 {
119         struct wb_writeback_work *work;
120
121         /*
122          * This is WB_SYNC_NONE writeback, so if allocation fails just
123          * wakeup the thread for old dirty data writeback
124          */
125         work = kzalloc(sizeof(*work), GFP_ATOMIC);
126         if (!work) {
127                 trace_writeback_nowork(bdi);
128                 bdi_wakeup_thread(bdi);
129                 return;
130         }
131
132         work->sync_mode = WB_SYNC_NONE;
133         work->nr_pages  = nr_pages;
134         work->range_cyclic = range_cyclic;
135         work->reason    = reason;
136
137         bdi_queue_work(bdi, work);
138 }
139
140 /**
141  * bdi_start_writeback - start writeback
142  * @bdi: the backing device to write from
143  * @nr_pages: the number of pages to write
144  * @reason: reason why some writeback work was initiated
145  *
146  * Description:
147  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
148  *   started when this function returns, we make no guarantees on
149  *   completion. Caller need not hold sb s_umount semaphore.
150  *
151  */
152 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
153                         enum wb_reason reason)
154 {
155         __bdi_start_writeback(bdi, nr_pages, true, reason);
156 }
157
158 /**
159  * bdi_start_background_writeback - start background writeback
160  * @bdi: the backing device to write from
161  *
162  * Description:
163  *   This makes sure WB_SYNC_NONE background writeback happens. When
164  *   this function returns, it is only guaranteed that for given BDI
165  *   some IO is happening if we are over background dirty threshold.
166  *   Caller need not hold sb s_umount semaphore.
167  */
168 void bdi_start_background_writeback(struct backing_dev_info *bdi)
169 {
170         /*
171          * We just wake up the flusher thread. It will perform background
172          * writeback as soon as there is no other work to do.
173          */
174         trace_writeback_wake_background(bdi);
175         bdi_wakeup_thread(bdi);
176 }
177
178 /*
179  * Remove the inode from the writeback list it is on.
180  */
181 void inode_wb_list_del(struct inode *inode)
182 {
183         struct backing_dev_info *bdi = inode_to_bdi(inode);
184
185         spin_lock(&bdi->wb.list_lock);
186         list_del_init(&inode->i_wb_list);
187         spin_unlock(&bdi->wb.list_lock);
188 }
189
190 /*
191  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
192  * furthest end of its superblock's dirty-inode list.
193  *
194  * Before stamping the inode's ->dirtied_when, we check to see whether it is
195  * already the most-recently-dirtied inode on the b_dirty list.  If that is
196  * the case then the inode must have been redirtied while it was being written
197  * out and we don't reset its dirtied_when.
198  */
199 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
200 {
201         assert_spin_locked(&wb->list_lock);
202         if (!list_empty(&wb->b_dirty)) {
203                 struct inode *tail;
204
205                 tail = wb_inode(wb->b_dirty.next);
206                 if (time_before(inode->dirtied_when, tail->dirtied_when))
207                         inode->dirtied_when = jiffies;
208         }
209         list_move(&inode->i_wb_list, &wb->b_dirty);
210 }
211
212 /*
213  * requeue inode for re-scanning after bdi->b_io list is exhausted.
214  */
215 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
216 {
217         assert_spin_locked(&wb->list_lock);
218         list_move(&inode->i_wb_list, &wb->b_more_io);
219 }
220
221 static void inode_sync_complete(struct inode *inode)
222 {
223         inode->i_state &= ~I_SYNC;
224         /* If inode is clean an unused, put it into LRU now... */
225         inode_add_lru(inode);
226         /* Waiters must see I_SYNC cleared before being woken up */
227         smp_mb();
228         wake_up_bit(&inode->i_state, __I_SYNC);
229 }
230
231 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
232 {
233         bool ret = time_after(inode->dirtied_when, t);
234 #ifndef CONFIG_64BIT
235         /*
236          * For inodes being constantly redirtied, dirtied_when can get stuck.
237          * It _appears_ to be in the future, but is actually in distant past.
238          * This test is necessary to prevent such wrapped-around relative times
239          * from permanently stopping the whole bdi writeback.
240          */
241         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
242 #endif
243         return ret;
244 }
245
246 /*
247  * Move expired (dirtied before work->older_than_this) dirty inodes from
248  * @delaying_queue to @dispatch_queue.
249  */
250 static int move_expired_inodes(struct list_head *delaying_queue,
251                                struct list_head *dispatch_queue,
252                                struct wb_writeback_work *work)
253 {
254         LIST_HEAD(tmp);
255         struct list_head *pos, *node;
256         struct super_block *sb = NULL;
257         struct inode *inode;
258         int do_sb_sort = 0;
259         int moved = 0;
260
261         while (!list_empty(delaying_queue)) {
262                 inode = wb_inode(delaying_queue->prev);
263                 if (work->older_than_this &&
264                     inode_dirtied_after(inode, *work->older_than_this))
265                         break;
266                 if (sb && sb != inode->i_sb)
267                         do_sb_sort = 1;
268                 sb = inode->i_sb;
269                 list_move(&inode->i_wb_list, &tmp);
270                 moved++;
271         }
272
273         /* just one sb in list, splice to dispatch_queue and we're done */
274         if (!do_sb_sort) {
275                 list_splice(&tmp, dispatch_queue);
276                 goto out;
277         }
278
279         /* Move inodes from one superblock together */
280         while (!list_empty(&tmp)) {
281                 sb = wb_inode(tmp.prev)->i_sb;
282                 list_for_each_prev_safe(pos, node, &tmp) {
283                         inode = wb_inode(pos);
284                         if (inode->i_sb == sb)
285                                 list_move(&inode->i_wb_list, dispatch_queue);
286                 }
287         }
288 out:
289         return moved;
290 }
291
292 /*
293  * Queue all expired dirty inodes for io, eldest first.
294  * Before
295  *         newly dirtied     b_dirty    b_io    b_more_io
296  *         =============>    gf         edc     BA
297  * After
298  *         newly dirtied     b_dirty    b_io    b_more_io
299  *         =============>    g          fBAedc
300  *                                           |
301  *                                           +--> dequeue for IO
302  */
303 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
304 {
305         int moved;
306         assert_spin_locked(&wb->list_lock);
307         list_splice_init(&wb->b_more_io, &wb->b_io);
308         moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
309         trace_writeback_queue_io(wb, work, moved);
310 }
311
312 static int write_inode(struct inode *inode, struct writeback_control *wbc)
313 {
314         int ret;
315
316         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
317                 trace_writeback_write_inode_start(inode, wbc);
318                 ret = inode->i_sb->s_op->write_inode(inode, wbc);
319                 trace_writeback_write_inode(inode, wbc);
320                 return ret;
321         }
322         return 0;
323 }
324
325 /*
326  * Wait for writeback on an inode to complete. Called with i_lock held.
327  * Caller must make sure inode cannot go away when we drop i_lock.
328  */
329 static void __inode_wait_for_writeback(struct inode *inode)
330         __releases(inode->i_lock)
331         __acquires(inode->i_lock)
332 {
333         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
334         wait_queue_head_t *wqh;
335
336         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
337         while (inode->i_state & I_SYNC) {
338                 spin_unlock(&inode->i_lock);
339                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
340                 spin_lock(&inode->i_lock);
341         }
342 }
343
344 /*
345  * Wait for writeback on an inode to complete. Caller must have inode pinned.
346  */
347 void inode_wait_for_writeback(struct inode *inode)
348 {
349         spin_lock(&inode->i_lock);
350         __inode_wait_for_writeback(inode);
351         spin_unlock(&inode->i_lock);
352 }
353
354 /*
355  * Sleep until I_SYNC is cleared. This function must be called with i_lock
356  * held and drops it. It is aimed for callers not holding any inode reference
357  * so once i_lock is dropped, inode can go away.
358  */
359 static void inode_sleep_on_writeback(struct inode *inode)
360         __releases(inode->i_lock)
361 {
362         DEFINE_WAIT(wait);
363         wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
364         int sleep;
365
366         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
367         sleep = inode->i_state & I_SYNC;
368         spin_unlock(&inode->i_lock);
369         if (sleep)
370                 schedule();
371         finish_wait(wqh, &wait);
372 }
373
374 /*
375  * Find proper writeback list for the inode depending on its current state and
376  * possibly also change of its state while we were doing writeback.  Here we
377  * handle things such as livelock prevention or fairness of writeback among
378  * inodes. This function can be called only by flusher thread - noone else
379  * processes all inodes in writeback lists and requeueing inodes behind flusher
380  * thread's back can have unexpected consequences.
381  */
382 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
383                           struct writeback_control *wbc)
384 {
385         if (inode->i_state & I_FREEING)
386                 return;
387
388         /*
389          * Sync livelock prevention. Each inode is tagged and synced in one
390          * shot. If still dirty, it will be redirty_tail()'ed below.  Update
391          * the dirty time to prevent enqueue and sync it again.
392          */
393         if ((inode->i_state & I_DIRTY) &&
394             (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
395                 inode->dirtied_when = jiffies;
396
397         if (wbc->pages_skipped) {
398                 /*
399                  * writeback is not making progress due to locked
400                  * buffers. Skip this inode for now.
401                  */
402                 redirty_tail(inode, wb);
403                 return;
404         }
405
406         if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
407                 /*
408                  * We didn't write back all the pages.  nfs_writepages()
409                  * sometimes bales out without doing anything.
410                  */
411                 if (wbc->nr_to_write <= 0) {
412                         /* Slice used up. Queue for next turn. */
413                         requeue_io(inode, wb);
414                 } else {
415                         /*
416                          * Writeback blocked by something other than
417                          * congestion. Delay the inode for some time to
418                          * avoid spinning on the CPU (100% iowait)
419                          * retrying writeback of the dirty page/inode
420                          * that cannot be performed immediately.
421                          */
422                         redirty_tail(inode, wb);
423                 }
424         } else if (inode->i_state & I_DIRTY) {
425                 /*
426                  * Filesystems can dirty the inode during writeback operations,
427                  * such as delayed allocation during submission or metadata
428                  * updates after data IO completion.
429                  */
430                 redirty_tail(inode, wb);
431         } else {
432                 /* The inode is clean. Remove from writeback lists. */
433                 list_del_init(&inode->i_wb_list);
434         }
435 }
436
437 /*
438  * Write out an inode and its dirty pages. Do not update the writeback list
439  * linkage. That is left to the caller. The caller is also responsible for
440  * setting I_SYNC flag and calling inode_sync_complete() to clear it.
441  */
442 static int
443 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
444 {
445         struct address_space *mapping = inode->i_mapping;
446         long nr_to_write = wbc->nr_to_write;
447         unsigned dirty;
448         int ret;
449
450         WARN_ON(!(inode->i_state & I_SYNC));
451
452         trace_writeback_single_inode_start(inode, wbc, nr_to_write);
453
454         ret = do_writepages(mapping, wbc);
455
456         /*
457          * Make sure to wait on the data before writing out the metadata.
458          * This is important for filesystems that modify metadata on data
459          * I/O completion.
460          */
461         if (wbc->sync_mode == WB_SYNC_ALL) {
462                 int err = filemap_fdatawait(mapping);
463                 if (ret == 0)
464                         ret = err;
465         }
466
467         /*
468          * Some filesystems may redirty the inode during the writeback
469          * due to delalloc, clear dirty metadata flags right before
470          * write_inode()
471          */
472         spin_lock(&inode->i_lock);
473         /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
474         if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
475                 inode->i_state &= ~I_DIRTY_PAGES;
476         dirty = inode->i_state & I_DIRTY;
477         inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
478         spin_unlock(&inode->i_lock);
479         /* Don't write the inode if only I_DIRTY_PAGES was set */
480         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
481                 int err = write_inode(inode, wbc);
482                 if (ret == 0)
483                         ret = err;
484         }
485         trace_writeback_single_inode(inode, wbc, nr_to_write);
486         return ret;
487 }
488
489 /*
490  * Write out an inode's dirty pages. Either the caller has an active reference
491  * on the inode or the inode has I_WILL_FREE set.
492  *
493  * This function is designed to be called for writing back one inode which
494  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
495  * and does more profound writeback list handling in writeback_sb_inodes().
496  */
497 static int
498 writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
499                        struct writeback_control *wbc)
500 {
501         int ret = 0;
502
503         spin_lock(&inode->i_lock);
504         if (!atomic_read(&inode->i_count))
505                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
506         else
507                 WARN_ON(inode->i_state & I_WILL_FREE);
508
509         if (inode->i_state & I_SYNC) {
510                 if (wbc->sync_mode != WB_SYNC_ALL)
511                         goto out;
512                 /*
513                  * It's a data-integrity sync. We must wait. Since callers hold
514                  * inode reference or inode has I_WILL_FREE set, it cannot go
515                  * away under us.
516                  */
517                 __inode_wait_for_writeback(inode);
518         }
519         WARN_ON(inode->i_state & I_SYNC);
520         /*
521          * Skip inode if it is clean and we have no outstanding writeback in
522          * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
523          * function since flusher thread may be doing for example sync in
524          * parallel and if we move the inode, it could get skipped. So here we
525          * make sure inode is on some writeback list and leave it there unless
526          * we have completely cleaned the inode.
527          */
528         if (!(inode->i_state & I_DIRTY) &&
529             (wbc->sync_mode != WB_SYNC_ALL ||
530              !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
531                 goto out;
532         inode->i_state |= I_SYNC;
533         spin_unlock(&inode->i_lock);
534
535         ret = __writeback_single_inode(inode, wbc);
536
537         spin_lock(&wb->list_lock);
538         spin_lock(&inode->i_lock);
539         /*
540          * If inode is clean, remove it from writeback lists. Otherwise don't
541          * touch it. See comment above for explanation.
542          */
543         if (!(inode->i_state & I_DIRTY))
544                 list_del_init(&inode->i_wb_list);
545         spin_unlock(&wb->list_lock);
546         inode_sync_complete(inode);
547 out:
548         spin_unlock(&inode->i_lock);
549         return ret;
550 }
551
552 static long writeback_chunk_size(struct backing_dev_info *bdi,
553                                  struct wb_writeback_work *work)
554 {
555         long pages;
556
557         /*
558          * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
559          * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
560          * here avoids calling into writeback_inodes_wb() more than once.
561          *
562          * The intended call sequence for WB_SYNC_ALL writeback is:
563          *
564          *      wb_writeback()
565          *          writeback_sb_inodes()       <== called only once
566          *              write_cache_pages()     <== called once for each inode
567          *                   (quickly) tag currently dirty pages
568          *                   (maybe slowly) sync all tagged pages
569          */
570         if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
571                 pages = LONG_MAX;
572         else {
573                 pages = min(bdi->avg_write_bandwidth / 2,
574                             global_dirty_limit / DIRTY_SCOPE);
575                 pages = min(pages, work->nr_pages);
576                 pages = round_down(pages + MIN_WRITEBACK_PAGES,
577                                    MIN_WRITEBACK_PAGES);
578         }
579
580         return pages;
581 }
582
583 /*
584  * Write a portion of b_io inodes which belong to @sb.
585  *
586  * Return the number of pages and/or inodes written.
587  */
588 static long writeback_sb_inodes(struct super_block *sb,
589                                 struct bdi_writeback *wb,
590                                 struct wb_writeback_work *work)
591 {
592         struct writeback_control wbc = {
593                 .sync_mode              = work->sync_mode,
594                 .tagged_writepages      = work->tagged_writepages,
595                 .for_kupdate            = work->for_kupdate,
596                 .for_background         = work->for_background,
597                 .range_cyclic           = work->range_cyclic,
598                 .range_start            = 0,
599                 .range_end              = LLONG_MAX,
600         };
601         unsigned long start_time = jiffies;
602         long write_chunk;
603         long wrote = 0;  /* count both pages and inodes */
604
605         while (!list_empty(&wb->b_io)) {
606                 struct inode *inode = wb_inode(wb->b_io.prev);
607
608                 if (inode->i_sb != sb) {
609                         if (work->sb) {
610                                 /*
611                                  * We only want to write back data for this
612                                  * superblock, move all inodes not belonging
613                                  * to it back onto the dirty list.
614                                  */
615                                 redirty_tail(inode, wb);
616                                 continue;
617                         }
618
619                         /*
620                          * The inode belongs to a different superblock.
621                          * Bounce back to the caller to unpin this and
622                          * pin the next superblock.
623                          */
624                         break;
625                 }
626
627                 /*
628                  * Don't bother with new inodes or inodes being freed, first
629                  * kind does not need periodic writeout yet, and for the latter
630                  * kind writeout is handled by the freer.
631                  */
632                 spin_lock(&inode->i_lock);
633                 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
634                         spin_unlock(&inode->i_lock);
635                         redirty_tail(inode, wb);
636                         continue;
637                 }
638                 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
639                         /*
640                          * If this inode is locked for writeback and we are not
641                          * doing writeback-for-data-integrity, move it to
642                          * b_more_io so that writeback can proceed with the
643                          * other inodes on s_io.
644                          *
645                          * We'll have another go at writing back this inode
646                          * when we completed a full scan of b_io.
647                          */
648                         spin_unlock(&inode->i_lock);
649                         requeue_io(inode, wb);
650                         trace_writeback_sb_inodes_requeue(inode);
651                         continue;
652                 }
653                 spin_unlock(&wb->list_lock);
654
655                 /*
656                  * We already requeued the inode if it had I_SYNC set and we
657                  * are doing WB_SYNC_NONE writeback. So this catches only the
658                  * WB_SYNC_ALL case.
659                  */
660                 if (inode->i_state & I_SYNC) {
661                         /* Wait for I_SYNC. This function drops i_lock... */
662                         inode_sleep_on_writeback(inode);
663                         /* Inode may be gone, start again */
664                         spin_lock(&wb->list_lock);
665                         continue;
666                 }
667                 inode->i_state |= I_SYNC;
668                 spin_unlock(&inode->i_lock);
669
670                 write_chunk = writeback_chunk_size(wb->bdi, work);
671                 wbc.nr_to_write = write_chunk;
672                 wbc.pages_skipped = 0;
673
674                 /*
675                  * We use I_SYNC to pin the inode in memory. While it is set
676                  * evict_inode() will wait so the inode cannot be freed.
677                  */
678                 __writeback_single_inode(inode, &wbc);
679
680                 work->nr_pages -= write_chunk - wbc.nr_to_write;
681                 wrote += write_chunk - wbc.nr_to_write;
682                 spin_lock(&wb->list_lock);
683                 spin_lock(&inode->i_lock);
684                 if (!(inode->i_state & I_DIRTY))
685                         wrote++;
686                 requeue_inode(inode, wb, &wbc);
687                 inode_sync_complete(inode);
688                 spin_unlock(&inode->i_lock);
689                 cond_resched_lock(&wb->list_lock);
690                 /*
691                  * bail out to wb_writeback() often enough to check
692                  * background threshold and other termination conditions.
693                  */
694                 if (wrote) {
695                         if (time_is_before_jiffies(start_time + HZ / 10UL))
696                                 break;
697                         if (work->nr_pages <= 0)
698                                 break;
699                 }
700         }
701         return wrote;
702 }
703
704 static long __writeback_inodes_wb(struct bdi_writeback *wb,
705                                   struct wb_writeback_work *work)
706 {
707         unsigned long start_time = jiffies;
708         long wrote = 0;
709
710         while (!list_empty(&wb->b_io)) {
711                 struct inode *inode = wb_inode(wb->b_io.prev);
712                 struct super_block *sb = inode->i_sb;
713
714                 if (!grab_super_passive(sb)) {
715                         /*
716                          * grab_super_passive() may fail consistently due to
717                          * s_umount being grabbed by someone else. Don't use
718                          * requeue_io() to avoid busy retrying the inode/sb.
719                          */
720                         redirty_tail(inode, wb);
721                         continue;
722                 }
723                 wrote += writeback_sb_inodes(sb, wb, work);
724                 drop_super(sb);
725
726                 /* refer to the same tests at the end of writeback_sb_inodes */
727                 if (wrote) {
728                         if (time_is_before_jiffies(start_time + HZ / 10UL))
729                                 break;
730                         if (work->nr_pages <= 0)
731                                 break;
732                 }
733         }
734         /* Leave any unwritten inodes on b_io */
735         return wrote;
736 }
737
738 long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
739                                 enum wb_reason reason)
740 {
741         struct wb_writeback_work work = {
742                 .nr_pages       = nr_pages,
743                 .sync_mode      = WB_SYNC_NONE,
744                 .range_cyclic   = 1,
745                 .reason         = reason,
746         };
747
748         spin_lock(&wb->list_lock);
749         if (list_empty(&wb->b_io))
750                 queue_io(wb, &work);
751         __writeback_inodes_wb(wb, &work);
752         spin_unlock(&wb->list_lock);
753
754         return nr_pages - work.nr_pages;
755 }
756
757 static bool over_bground_thresh(struct backing_dev_info *bdi)
758 {
759         unsigned long background_thresh, dirty_thresh;
760
761         global_dirty_limits(&background_thresh, &dirty_thresh);
762
763         if (global_page_state(NR_FILE_DIRTY) +
764             global_page_state(NR_UNSTABLE_NFS) > background_thresh)
765                 return true;
766
767         if (bdi_stat(bdi, BDI_RECLAIMABLE) >
768                                 bdi_dirty_limit(bdi, background_thresh))
769                 return true;
770
771         return false;
772 }
773
774 /*
775  * Called under wb->list_lock. If there are multiple wb per bdi,
776  * only the flusher working on the first wb should do it.
777  */
778 static void wb_update_bandwidth(struct bdi_writeback *wb,
779                                 unsigned long start_time)
780 {
781         __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
782 }
783
784 /*
785  * Explicit flushing or periodic writeback of "old" data.
786  *
787  * Define "old": the first time one of an inode's pages is dirtied, we mark the
788  * dirtying-time in the inode's address_space.  So this periodic writeback code
789  * just walks the superblock inode list, writing back any inodes which are
790  * older than a specific point in time.
791  *
792  * Try to run once per dirty_writeback_interval.  But if a writeback event
793  * takes longer than a dirty_writeback_interval interval, then leave a
794  * one-second gap.
795  *
796  * older_than_this takes precedence over nr_to_write.  So we'll only write back
797  * all dirty pages if they are all attached to "old" mappings.
798  */
799 static long wb_writeback(struct bdi_writeback *wb,
800                          struct wb_writeback_work *work)
801 {
802         unsigned long wb_start = jiffies;
803         long nr_pages = work->nr_pages;
804         unsigned long oldest_jif;
805         struct inode *inode;
806         long progress;
807
808         oldest_jif = jiffies;
809         work->older_than_this = &oldest_jif;
810
811         spin_lock(&wb->list_lock);
812         for (;;) {
813                 /*
814                  * Stop writeback when nr_pages has been consumed
815                  */
816                 if (work->nr_pages <= 0)
817                         break;
818
819                 /*
820                  * Background writeout and kupdate-style writeback may
821                  * run forever. Stop them if there is other work to do
822                  * so that e.g. sync can proceed. They'll be restarted
823                  * after the other works are all done.
824                  */
825                 if ((work->for_background || work->for_kupdate) &&
826                     !list_empty(&wb->bdi->work_list))
827                         break;
828
829                 /*
830                  * For background writeout, stop when we are below the
831                  * background dirty threshold
832                  */
833                 if (work->for_background && !over_bground_thresh(wb->bdi))
834                         break;
835
836                 /*
837                  * Kupdate and background works are special and we want to
838                  * include all inodes that need writing. Livelock avoidance is
839                  * handled by these works yielding to any other work so we are
840                  * safe.
841                  */
842                 if (work->for_kupdate) {
843                         oldest_jif = jiffies -
844                                 msecs_to_jiffies(dirty_expire_interval * 10);
845                 } else if (work->for_background)
846                         oldest_jif = jiffies;
847
848                 trace_writeback_start(wb->bdi, work);
849                 if (list_empty(&wb->b_io))
850                         queue_io(wb, work);
851                 if (work->sb)
852                         progress = writeback_sb_inodes(work->sb, wb, work);
853                 else
854                         progress = __writeback_inodes_wb(wb, work);
855                 trace_writeback_written(wb->bdi, work);
856
857                 wb_update_bandwidth(wb, wb_start);
858
859                 /*
860                  * Did we write something? Try for more
861                  *
862                  * Dirty inodes are moved to b_io for writeback in batches.
863                  * The completion of the current batch does not necessarily
864                  * mean the overall work is done. So we keep looping as long
865                  * as made some progress on cleaning pages or inodes.
866                  */
867                 if (progress)
868                         continue;
869                 /*
870                  * No more inodes for IO, bail
871                  */
872                 if (list_empty(&wb->b_more_io))
873                         break;
874                 /*
875                  * Nothing written. Wait for some inode to
876                  * become available for writeback. Otherwise
877                  * we'll just busyloop.
878                  */
879                 if (!list_empty(&wb->b_more_io))  {
880                         trace_writeback_wait(wb->bdi, work);
881                         inode = wb_inode(wb->b_more_io.prev);
882                         spin_lock(&inode->i_lock);
883                         spin_unlock(&wb->list_lock);
884                         /* This function drops i_lock... */
885                         inode_sleep_on_writeback(inode);
886                         spin_lock(&wb->list_lock);
887                 }
888         }
889         spin_unlock(&wb->list_lock);
890
891         return nr_pages - work->nr_pages;
892 }
893
894 /*
895  * Return the next wb_writeback_work struct that hasn't been processed yet.
896  */
897 static struct wb_writeback_work *
898 get_next_work_item(struct backing_dev_info *bdi)
899 {
900         struct wb_writeback_work *work = NULL;
901
902         spin_lock_bh(&bdi->wb_lock);
903         if (!list_empty(&bdi->work_list)) {
904                 work = list_entry(bdi->work_list.next,
905                                   struct wb_writeback_work, list);
906                 list_del_init(&work->list);
907         }
908         spin_unlock_bh(&bdi->wb_lock);
909         return work;
910 }
911
912 /*
913  * Add in the number of potentially dirty inodes, because each inode
914  * write can dirty pagecache in the underlying blockdev.
915  */
916 static unsigned long get_nr_dirty_pages(void)
917 {
918         return global_page_state(NR_FILE_DIRTY) +
919                 global_page_state(NR_UNSTABLE_NFS) +
920                 get_nr_dirty_inodes();
921 }
922
923 static long wb_check_background_flush(struct bdi_writeback *wb)
924 {
925         if (over_bground_thresh(wb->bdi)) {
926
927                 struct wb_writeback_work work = {
928                         .nr_pages       = LONG_MAX,
929                         .sync_mode      = WB_SYNC_NONE,
930                         .for_background = 1,
931                         .range_cyclic   = 1,
932                         .reason         = WB_REASON_BACKGROUND,
933                 };
934
935                 return wb_writeback(wb, &work);
936         }
937
938         return 0;
939 }
940
941 static long wb_check_old_data_flush(struct bdi_writeback *wb)
942 {
943         unsigned long expired;
944         long nr_pages;
945
946         /*
947          * When set to zero, disable periodic writeback
948          */
949         if (!dirty_writeback_interval)
950                 return 0;
951
952         expired = wb->last_old_flush +
953                         msecs_to_jiffies(dirty_writeback_interval * 10);
954         if (time_before(jiffies, expired))
955                 return 0;
956
957         wb->last_old_flush = jiffies;
958         nr_pages = get_nr_dirty_pages();
959
960         if (nr_pages) {
961                 struct wb_writeback_work work = {
962                         .nr_pages       = nr_pages,
963                         .sync_mode      = WB_SYNC_NONE,
964                         .for_kupdate    = 1,
965                         .range_cyclic   = 1,
966                         .reason         = WB_REASON_PERIODIC,
967                 };
968
969                 return wb_writeback(wb, &work);
970         }
971
972         return 0;
973 }
974
975 /*
976  * Retrieve work items and do the writeback they describe
977  */
978 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
979 {
980         struct backing_dev_info *bdi = wb->bdi;
981         struct wb_writeback_work *work;
982         long wrote = 0;
983
984         set_bit(BDI_writeback_running, &wb->bdi->state);
985         while ((work = get_next_work_item(bdi)) != NULL) {
986                 /*
987                  * Override sync mode, in case we must wait for completion
988                  * because this thread is exiting now.
989                  */
990                 if (force_wait)
991                         work->sync_mode = WB_SYNC_ALL;
992
993                 trace_writeback_exec(bdi, work);
994
995                 wrote += wb_writeback(wb, work);
996
997                 /*
998                  * Notify the caller of completion if this is a synchronous
999                  * work item, otherwise just free it.
1000                  */
1001                 if (work->done)
1002                         complete(work->done);
1003                 else
1004                         kfree(work);
1005         }
1006
1007         /*
1008          * Check for periodic writeback, kupdated() style
1009          */
1010         wrote += wb_check_old_data_flush(wb);
1011         wrote += wb_check_background_flush(wb);
1012         clear_bit(BDI_writeback_running, &wb->bdi->state);
1013
1014         return wrote;
1015 }
1016
1017 /*
1018  * Handle writeback of dirty data for the device backed by this bdi. Also
1019  * reschedules periodically and does kupdated style flushing.
1020  */
1021 void bdi_writeback_workfn(struct work_struct *work)
1022 {
1023         struct bdi_writeback *wb = container_of(to_delayed_work(work),
1024                                                 struct bdi_writeback, dwork);
1025         struct backing_dev_info *bdi = wb->bdi;
1026         long pages_written;
1027
1028         set_worker_desc("flush-%s", dev_name(bdi->dev));
1029         current->flags |= PF_SWAPWRITE;
1030
1031         if (likely(!current_is_workqueue_rescuer() ||
1032                    !test_bit(BDI_registered, &bdi->state))) {
1033                 /*
1034                  * The normal path.  Keep writing back @bdi until its
1035                  * work_list is empty.  Note that this path is also taken
1036                  * if @bdi is shutting down even when we're running off the
1037                  * rescuer as work_list needs to be drained.
1038                  */
1039                 do {
1040                         pages_written = wb_do_writeback(wb, 0);
1041                         trace_writeback_pages_written(pages_written);
1042                 } while (!list_empty(&bdi->work_list));
1043         } else {
1044                 /*
1045                  * bdi_wq can't get enough workers and we're running off
1046                  * the emergency worker.  Don't hog it.  Hopefully, 1024 is
1047                  * enough for efficient IO.
1048                  */
1049                 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1050                                                     WB_REASON_FORKER_THREAD);
1051                 trace_writeback_pages_written(pages_written);
1052         }
1053
1054         if (!list_empty(&bdi->work_list))
1055                 mod_delayed_work(bdi_wq, &wb->dwork, 0);
1056         else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1057                 bdi_wakeup_thread_delayed(bdi);
1058
1059         current->flags &= ~PF_SWAPWRITE;
1060 }
1061
1062 /*
1063  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
1064  * the whole world.
1065  */
1066 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1067 {
1068         struct backing_dev_info *bdi;
1069
1070         if (!nr_pages) {
1071                 nr_pages = global_page_state(NR_FILE_DIRTY) +
1072                                 global_page_state(NR_UNSTABLE_NFS);
1073         }
1074
1075         rcu_read_lock();
1076         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1077                 if (!bdi_has_dirty_io(bdi))
1078                         continue;
1079                 __bdi_start_writeback(bdi, nr_pages, false, reason);
1080         }
1081         rcu_read_unlock();
1082 }
1083
1084 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1085 {
1086         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1087                 struct dentry *dentry;
1088                 const char *name = "?";
1089
1090                 dentry = d_find_alias(inode);
1091                 if (dentry) {
1092                         spin_lock(&dentry->d_lock);
1093                         name = (const char *) dentry->d_name.name;
1094                 }
1095                 printk(KERN_DEBUG
1096                        "%s(%d): dirtied inode %lu (%s) on %s\n",
1097                        current->comm, task_pid_nr(current), inode->i_ino,
1098                        name, inode->i_sb->s_id);
1099                 if (dentry) {
1100                         spin_unlock(&dentry->d_lock);
1101                         dput(dentry);
1102                 }
1103         }
1104 }
1105
1106 /**
1107  *      __mark_inode_dirty -    internal function
1108  *      @inode: inode to mark
1109  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1110  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
1111  *      mark_inode_dirty_sync.
1112  *
1113  * Put the inode on the super block's dirty list.
1114  *
1115  * CAREFUL! We mark it dirty unconditionally, but move it onto the
1116  * dirty list only if it is hashed or if it refers to a blockdev.
1117  * If it was not hashed, it will never be added to the dirty list
1118  * even if it is later hashed, as it will have been marked dirty already.
1119  *
1120  * In short, make sure you hash any inodes _before_ you start marking
1121  * them dirty.
1122  *
1123  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1124  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
1125  * the kernel-internal blockdev inode represents the dirtying time of the
1126  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
1127  * page->mapping->host, so the page-dirtying time is recorded in the internal
1128  * blockdev inode.
1129  */
1130 void __mark_inode_dirty(struct inode *inode, int flags)
1131 {
1132         struct super_block *sb = inode->i_sb;
1133         struct backing_dev_info *bdi = NULL;
1134
1135         /*
1136          * Don't do this for I_DIRTY_PAGES - that doesn't actually
1137          * dirty the inode itself
1138          */
1139         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1140                 trace_writeback_dirty_inode_start(inode, flags);
1141
1142                 if (sb->s_op->dirty_inode)
1143                         sb->s_op->dirty_inode(inode, flags);
1144
1145                 trace_writeback_dirty_inode(inode, flags);
1146         }
1147
1148         /*
1149          * make sure that changes are seen by all cpus before we test i_state
1150          * -- mikulas
1151          */
1152         smp_mb();
1153
1154         /* avoid the locking if we can */
1155         if ((inode->i_state & flags) == flags)
1156                 return;
1157
1158         if (unlikely(block_dump > 1))
1159                 block_dump___mark_inode_dirty(inode);
1160
1161         spin_lock(&inode->i_lock);
1162         if ((inode->i_state & flags) != flags) {
1163                 const int was_dirty = inode->i_state & I_DIRTY;
1164
1165                 inode->i_state |= flags;
1166
1167                 /*
1168                  * If the inode is being synced, just update its dirty state.
1169                  * The unlocker will place the inode on the appropriate
1170                  * superblock list, based upon its state.
1171                  */
1172                 if (inode->i_state & I_SYNC)
1173                         goto out_unlock_inode;
1174
1175                 /*
1176                  * Only add valid (hashed) inodes to the superblock's
1177                  * dirty list.  Add blockdev inodes as well.
1178                  */
1179                 if (!S_ISBLK(inode->i_mode)) {
1180                         if (inode_unhashed(inode))
1181                                 goto out_unlock_inode;
1182                 }
1183                 if (inode->i_state & I_FREEING)
1184                         goto out_unlock_inode;
1185
1186                 /*
1187                  * If the inode was already on b_dirty/b_io/b_more_io, don't
1188                  * reposition it (that would break b_dirty time-ordering).
1189                  */
1190                 if (!was_dirty) {
1191                         bool wakeup_bdi = false;
1192                         bdi = inode_to_bdi(inode);
1193
1194                         if (bdi_cap_writeback_dirty(bdi)) {
1195                                 WARN(!test_bit(BDI_registered, &bdi->state),
1196                                      "bdi-%s not registered\n", bdi->name);
1197
1198                                 /*
1199                                  * If this is the first dirty inode for this
1200                                  * bdi, we have to wake-up the corresponding
1201                                  * bdi thread to make sure background
1202                                  * write-back happens later.
1203                                  */
1204                                 if (!wb_has_dirty_io(&bdi->wb))
1205                                         wakeup_bdi = true;
1206                         }
1207
1208                         spin_unlock(&inode->i_lock);
1209                         spin_lock(&bdi->wb.list_lock);
1210                         inode->dirtied_when = jiffies;
1211                         list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1212                         spin_unlock(&bdi->wb.list_lock);
1213
1214                         if (wakeup_bdi)
1215                                 bdi_wakeup_thread_delayed(bdi);
1216                         return;
1217                 }
1218         }
1219 out_unlock_inode:
1220         spin_unlock(&inode->i_lock);
1221
1222 }
1223 EXPORT_SYMBOL(__mark_inode_dirty);
1224
1225 static void wait_sb_inodes(struct super_block *sb)
1226 {
1227         struct inode *inode, *old_inode = NULL;
1228
1229         /*
1230          * We need to be protected against the filesystem going from
1231          * r/o to r/w or vice versa.
1232          */
1233         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1234
1235         spin_lock(&inode_sb_list_lock);
1236
1237         /*
1238          * Data integrity sync. Must wait for all pages under writeback,
1239          * because there may have been pages dirtied before our sync
1240          * call, but which had writeout started before we write it out.
1241          * In which case, the inode may not be on the dirty list, but
1242          * we still have to wait for that writeout.
1243          */
1244         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1245                 struct address_space *mapping = inode->i_mapping;
1246
1247                 spin_lock(&inode->i_lock);
1248                 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1249                     (mapping->nrpages == 0)) {
1250                         spin_unlock(&inode->i_lock);
1251                         continue;
1252                 }
1253                 __iget(inode);
1254                 spin_unlock(&inode->i_lock);
1255                 spin_unlock(&inode_sb_list_lock);
1256
1257                 /*
1258                  * We hold a reference to 'inode' so it couldn't have been
1259                  * removed from s_inodes list while we dropped the
1260                  * inode_sb_list_lock.  We cannot iput the inode now as we can
1261                  * be holding the last reference and we cannot iput it under
1262                  * inode_sb_list_lock. So we keep the reference and iput it
1263                  * later.
1264                  */
1265                 iput(old_inode);
1266                 old_inode = inode;
1267
1268                 filemap_fdatawait(mapping);
1269
1270                 cond_resched();
1271
1272                 spin_lock(&inode_sb_list_lock);
1273         }
1274         spin_unlock(&inode_sb_list_lock);
1275         iput(old_inode);
1276 }
1277
1278 /**
1279  * writeback_inodes_sb_nr -     writeback dirty inodes from given super_block
1280  * @sb: the superblock
1281  * @nr: the number of pages to write
1282  * @reason: reason why some writeback work initiated
1283  *
1284  * Start writeback on some inodes on this super_block. No guarantees are made
1285  * on how many (if any) will be written, and this function does not wait
1286  * for IO completion of submitted IO.
1287  */
1288 void writeback_inodes_sb_nr(struct super_block *sb,
1289                             unsigned long nr,
1290                             enum wb_reason reason)
1291 {
1292         DECLARE_COMPLETION_ONSTACK(done);
1293         struct wb_writeback_work work = {
1294                 .sb                     = sb,
1295                 .sync_mode              = WB_SYNC_NONE,
1296                 .tagged_writepages      = 1,
1297                 .done                   = &done,
1298                 .nr_pages               = nr,
1299                 .reason                 = reason,
1300         };
1301
1302         if (sb->s_bdi == &noop_backing_dev_info)
1303                 return;
1304         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1305         bdi_queue_work(sb->s_bdi, &work);
1306         wait_for_completion(&done);
1307 }
1308 EXPORT_SYMBOL(writeback_inodes_sb_nr);
1309
1310 /**
1311  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
1312  * @sb: the superblock
1313  * @reason: reason why some writeback work was initiated
1314  *
1315  * Start writeback on some inodes on this super_block. No guarantees are made
1316  * on how many (if any) will be written, and this function does not wait
1317  * for IO completion of submitted IO.
1318  */
1319 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1320 {
1321         return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1322 }
1323 EXPORT_SYMBOL(writeback_inodes_sb);
1324
1325 /**
1326  * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
1327  * @sb: the superblock
1328  * @nr: the number of pages to write
1329  * @reason: the reason of writeback
1330  *
1331  * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
1332  * Returns 1 if writeback was started, 0 if not.
1333  */
1334 int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1335                                   unsigned long nr,
1336                                   enum wb_reason reason)
1337 {
1338         if (writeback_in_progress(sb->s_bdi))
1339                 return 1;
1340
1341         if (!down_read_trylock(&sb->s_umount))
1342                 return 0;
1343
1344         writeback_inodes_sb_nr(sb, nr, reason);
1345         up_read(&sb->s_umount);
1346         return 1;
1347 }
1348 EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1349
1350 /**
1351  * try_to_writeback_inodes_sb - try to start writeback if none underway
1352  * @sb: the superblock
1353  * @reason: reason why some writeback work was initiated
1354  *
1355  * Implement by try_to_writeback_inodes_sb_nr()
1356  * Returns 1 if writeback was started, 0 if not.
1357  */
1358 int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1359 {
1360         return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1361 }
1362 EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1363
1364 /**
1365  * sync_inodes_sb       -       sync sb inode pages
1366  * @sb: the superblock
1367  *
1368  * This function writes and waits on any dirty inode belonging to this
1369  * super_block.
1370  */
1371 void sync_inodes_sb(struct super_block *sb)
1372 {
1373         DECLARE_COMPLETION_ONSTACK(done);
1374         struct wb_writeback_work work = {
1375                 .sb             = sb,
1376                 .sync_mode      = WB_SYNC_ALL,
1377                 .nr_pages       = LONG_MAX,
1378                 .range_cyclic   = 0,
1379                 .done           = &done,
1380                 .reason         = WB_REASON_SYNC,
1381         };
1382
1383         /* Nothing to do? */
1384         if (sb->s_bdi == &noop_backing_dev_info)
1385                 return;
1386         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1387
1388         bdi_queue_work(sb->s_bdi, &work);
1389         wait_for_completion(&done);
1390
1391         wait_sb_inodes(sb);
1392 }
1393 EXPORT_SYMBOL(sync_inodes_sb);
1394
1395 /**
1396  * write_inode_now      -       write an inode to disk
1397  * @inode: inode to write to disk
1398  * @sync: whether the write should be synchronous or not
1399  *
1400  * This function commits an inode to disk immediately if it is dirty. This is
1401  * primarily needed by knfsd.
1402  *
1403  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1404  */
1405 int write_inode_now(struct inode *inode, int sync)
1406 {
1407         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1408         struct writeback_control wbc = {
1409                 .nr_to_write = LONG_MAX,
1410                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1411                 .range_start = 0,
1412                 .range_end = LLONG_MAX,
1413         };
1414
1415         if (!mapping_cap_writeback_dirty(inode->i_mapping))
1416                 wbc.nr_to_write = 0;
1417
1418         might_sleep();
1419         return writeback_single_inode(inode, wb, &wbc);
1420 }
1421 EXPORT_SYMBOL(write_inode_now);
1422
1423 /**
1424  * sync_inode - write an inode and its pages to disk.
1425  * @inode: the inode to sync
1426  * @wbc: controls the writeback mode
1427  *
1428  * sync_inode() will write an inode and its pages to disk.  It will also
1429  * correctly update the inode on its superblock's dirty inode lists and will
1430  * update inode->i_state.
1431  *
1432  * The caller must have a ref on the inode.
1433  */
1434 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1435 {
1436         return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1437 }
1438 EXPORT_SYMBOL(sync_inode);
1439
1440 /**
1441  * sync_inode_metadata - write an inode to disk
1442  * @inode: the inode to sync
1443  * @wait: wait for I/O to complete.
1444  *
1445  * Write an inode to disk and adjust its dirty state after completion.
1446  *
1447  * Note: only writes the actual inode, no associated data or other metadata.
1448  */
1449 int sync_inode_metadata(struct inode *inode, int wait)
1450 {
1451         struct writeback_control wbc = {
1452                 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1453                 .nr_to_write = 0, /* metadata-only */
1454         };
1455
1456         return sync_inode(inode, &wbc);
1457 }
1458 EXPORT_SYMBOL(sync_inode_metadata);