include/linux/compiler*.h: make compiler-*.h mutually exclusive
[platform/kernel/linux-rpi.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 struct backing_dev_info noop_backing_dev_info = {
16         .name           = "noop",
17         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
18 };
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
20
21 static struct class *bdi_class;
22
23 /*
24  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
25  * locking.
26  */
27 DEFINE_SPINLOCK(bdi_lock);
28 LIST_HEAD(bdi_list);
29
30 /* bdi_wq serves all asynchronous writeback tasks */
31 struct workqueue_struct *bdi_wq;
32
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36
37 static struct dentry *bdi_debug_root;
38
39 static void bdi_debug_init(void)
40 {
41         bdi_debug_root = debugfs_create_dir("bdi", NULL);
42 }
43
44 static int bdi_debug_stats_show(struct seq_file *m, void *v)
45 {
46         struct backing_dev_info *bdi = m->private;
47         struct bdi_writeback *wb = &bdi->wb;
48         unsigned long background_thresh;
49         unsigned long dirty_thresh;
50         unsigned long wb_thresh;
51         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
52         struct inode *inode;
53
54         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
55         spin_lock(&wb->list_lock);
56         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
57                 nr_dirty++;
58         list_for_each_entry(inode, &wb->b_io, i_io_list)
59                 nr_io++;
60         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
61                 nr_more_io++;
62         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
63                 if (inode->i_state & I_DIRTY_TIME)
64                         nr_dirty_time++;
65         spin_unlock(&wb->list_lock);
66
67         global_dirty_limits(&background_thresh, &dirty_thresh);
68         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
69
70 #define K(x) ((x) << (PAGE_SHIFT - 10))
71         seq_printf(m,
72                    "BdiWriteback:       %10lu kB\n"
73                    "BdiReclaimable:     %10lu kB\n"
74                    "BdiDirtyThresh:     %10lu kB\n"
75                    "DirtyThresh:        %10lu kB\n"
76                    "BackgroundThresh:   %10lu kB\n"
77                    "BdiDirtied:         %10lu kB\n"
78                    "BdiWritten:         %10lu kB\n"
79                    "BdiWriteBandwidth:  %10lu kBps\n"
80                    "b_dirty:            %10lu\n"
81                    "b_io:               %10lu\n"
82                    "b_more_io:          %10lu\n"
83                    "b_dirty_time:       %10lu\n"
84                    "bdi_list:           %10u\n"
85                    "state:              %10lx\n",
86                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
87                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
88                    K(wb_thresh),
89                    K(dirty_thresh),
90                    K(background_thresh),
91                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
92                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
93                    (unsigned long) K(wb->write_bandwidth),
94                    nr_dirty,
95                    nr_io,
96                    nr_more_io,
97                    nr_dirty_time,
98                    !list_empty(&bdi->bdi_list), bdi->wb.state);
99 #undef K
100
101         return 0;
102 }
103 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
104
105 static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
106 {
107         if (!bdi_debug_root)
108                 return -ENOMEM;
109
110         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
111         if (!bdi->debug_dir)
112                 return -ENOMEM;
113
114         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
115                                                bdi, &bdi_debug_stats_fops);
116         if (!bdi->debug_stats) {
117                 debugfs_remove(bdi->debug_dir);
118                 bdi->debug_dir = NULL;
119                 return -ENOMEM;
120         }
121
122         return 0;
123 }
124
125 static void bdi_debug_unregister(struct backing_dev_info *bdi)
126 {
127         debugfs_remove(bdi->debug_stats);
128         debugfs_remove(bdi->debug_dir);
129 }
130 #else
131 static inline void bdi_debug_init(void)
132 {
133 }
134 static inline int bdi_debug_register(struct backing_dev_info *bdi,
135                                       const char *name)
136 {
137         return 0;
138 }
139 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
140 {
141 }
142 #endif
143
144 static ssize_t read_ahead_kb_store(struct device *dev,
145                                   struct device_attribute *attr,
146                                   const char *buf, size_t count)
147 {
148         struct backing_dev_info *bdi = dev_get_drvdata(dev);
149         unsigned long read_ahead_kb;
150         ssize_t ret;
151
152         ret = kstrtoul(buf, 10, &read_ahead_kb);
153         if (ret < 0)
154                 return ret;
155
156         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
157
158         return count;
159 }
160
161 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
162
163 #define BDI_SHOW(name, expr)                                            \
164 static ssize_t name##_show(struct device *dev,                          \
165                            struct device_attribute *attr, char *page)   \
166 {                                                                       \
167         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
168                                                                         \
169         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
170 }                                                                       \
171 static DEVICE_ATTR_RW(name);
172
173 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
174
175 static ssize_t min_ratio_store(struct device *dev,
176                 struct device_attribute *attr, const char *buf, size_t count)
177 {
178         struct backing_dev_info *bdi = dev_get_drvdata(dev);
179         unsigned int ratio;
180         ssize_t ret;
181
182         ret = kstrtouint(buf, 10, &ratio);
183         if (ret < 0)
184                 return ret;
185
186         ret = bdi_set_min_ratio(bdi, ratio);
187         if (!ret)
188                 ret = count;
189
190         return ret;
191 }
192 BDI_SHOW(min_ratio, bdi->min_ratio)
193
194 static ssize_t max_ratio_store(struct device *dev,
195                 struct device_attribute *attr, const char *buf, size_t count)
196 {
197         struct backing_dev_info *bdi = dev_get_drvdata(dev);
198         unsigned int ratio;
199         ssize_t ret;
200
201         ret = kstrtouint(buf, 10, &ratio);
202         if (ret < 0)
203                 return ret;
204
205         ret = bdi_set_max_ratio(bdi, ratio);
206         if (!ret)
207                 ret = count;
208
209         return ret;
210 }
211 BDI_SHOW(max_ratio, bdi->max_ratio)
212
213 static ssize_t stable_pages_required_show(struct device *dev,
214                                           struct device_attribute *attr,
215                                           char *page)
216 {
217         struct backing_dev_info *bdi = dev_get_drvdata(dev);
218
219         return snprintf(page, PAGE_SIZE-1, "%d\n",
220                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
221 }
222 static DEVICE_ATTR_RO(stable_pages_required);
223
224 static struct attribute *bdi_dev_attrs[] = {
225         &dev_attr_read_ahead_kb.attr,
226         &dev_attr_min_ratio.attr,
227         &dev_attr_max_ratio.attr,
228         &dev_attr_stable_pages_required.attr,
229         NULL,
230 };
231 ATTRIBUTE_GROUPS(bdi_dev);
232
233 static __init int bdi_class_init(void)
234 {
235         bdi_class = class_create(THIS_MODULE, "bdi");
236         if (IS_ERR(bdi_class))
237                 return PTR_ERR(bdi_class);
238
239         bdi_class->dev_groups = bdi_dev_groups;
240         bdi_debug_init();
241
242         return 0;
243 }
244 postcore_initcall(bdi_class_init);
245
246 static int bdi_init(struct backing_dev_info *bdi);
247
248 static int __init default_bdi_init(void)
249 {
250         int err;
251
252         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
253                                               WQ_UNBOUND | WQ_SYSFS, 0);
254         if (!bdi_wq)
255                 return -ENOMEM;
256
257         err = bdi_init(&noop_backing_dev_info);
258
259         return err;
260 }
261 subsys_initcall(default_bdi_init);
262
263 /*
264  * This function is used when the first inode for this wb is marked dirty. It
265  * wakes-up the corresponding bdi thread which should then take care of the
266  * periodic background write-out of dirty inodes. Since the write-out would
267  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
268  * set up a timer which wakes the bdi thread up later.
269  *
270  * Note, we wouldn't bother setting up the timer, but this function is on the
271  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
272  * by delaying the wake-up.
273  *
274  * We have to be careful not to postpone flush work if it is scheduled for
275  * earlier. Thus we use queue_delayed_work().
276  */
277 void wb_wakeup_delayed(struct bdi_writeback *wb)
278 {
279         unsigned long timeout;
280
281         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
282         spin_lock_bh(&wb->work_lock);
283         if (test_bit(WB_registered, &wb->state))
284                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
285         spin_unlock_bh(&wb->work_lock);
286 }
287
288 /*
289  * Initial write bandwidth: 100 MB/s
290  */
291 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
292
293 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
294                    int blkcg_id, gfp_t gfp)
295 {
296         int i, err;
297
298         memset(wb, 0, sizeof(*wb));
299
300         if (wb != &bdi->wb)
301                 bdi_get(bdi);
302         wb->bdi = bdi;
303         wb->last_old_flush = jiffies;
304         INIT_LIST_HEAD(&wb->b_dirty);
305         INIT_LIST_HEAD(&wb->b_io);
306         INIT_LIST_HEAD(&wb->b_more_io);
307         INIT_LIST_HEAD(&wb->b_dirty_time);
308         spin_lock_init(&wb->list_lock);
309
310         wb->bw_time_stamp = jiffies;
311         wb->balanced_dirty_ratelimit = INIT_BW;
312         wb->dirty_ratelimit = INIT_BW;
313         wb->write_bandwidth = INIT_BW;
314         wb->avg_write_bandwidth = INIT_BW;
315
316         spin_lock_init(&wb->work_lock);
317         INIT_LIST_HEAD(&wb->work_list);
318         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
319         wb->dirty_sleep = jiffies;
320
321         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
322         if (!wb->congested) {
323                 err = -ENOMEM;
324                 goto out_put_bdi;
325         }
326
327         err = fprop_local_init_percpu(&wb->completions, gfp);
328         if (err)
329                 goto out_put_cong;
330
331         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
332                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
333                 if (err)
334                         goto out_destroy_stat;
335         }
336
337         return 0;
338
339 out_destroy_stat:
340         while (i--)
341                 percpu_counter_destroy(&wb->stat[i]);
342         fprop_local_destroy_percpu(&wb->completions);
343 out_put_cong:
344         wb_congested_put(wb->congested);
345 out_put_bdi:
346         if (wb != &bdi->wb)
347                 bdi_put(bdi);
348         return err;
349 }
350
351 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
352
353 /*
354  * Remove bdi from the global list and shutdown any threads we have running
355  */
356 static void wb_shutdown(struct bdi_writeback *wb)
357 {
358         /* Make sure nobody queues further work */
359         spin_lock_bh(&wb->work_lock);
360         if (!test_and_clear_bit(WB_registered, &wb->state)) {
361                 spin_unlock_bh(&wb->work_lock);
362                 return;
363         }
364         spin_unlock_bh(&wb->work_lock);
365
366         cgwb_remove_from_bdi_list(wb);
367         /*
368          * Drain work list and shutdown the delayed_work.  !WB_registered
369          * tells wb_workfn() that @wb is dying and its work_list needs to
370          * be drained no matter what.
371          */
372         mod_delayed_work(bdi_wq, &wb->dwork, 0);
373         flush_delayed_work(&wb->dwork);
374         WARN_ON(!list_empty(&wb->work_list));
375 }
376
377 static void wb_exit(struct bdi_writeback *wb)
378 {
379         int i;
380
381         WARN_ON(delayed_work_pending(&wb->dwork));
382
383         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
384                 percpu_counter_destroy(&wb->stat[i]);
385
386         fprop_local_destroy_percpu(&wb->completions);
387         wb_congested_put(wb->congested);
388         if (wb != &wb->bdi->wb)
389                 bdi_put(wb->bdi);
390 }
391
392 #ifdef CONFIG_CGROUP_WRITEBACK
393
394 #include <linux/memcontrol.h>
395
396 /*
397  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
398  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
399  * protected.
400  */
401 static DEFINE_SPINLOCK(cgwb_lock);
402 static struct workqueue_struct *cgwb_release_wq;
403
404 /**
405  * wb_congested_get_create - get or create a wb_congested
406  * @bdi: associated bdi
407  * @blkcg_id: ID of the associated blkcg
408  * @gfp: allocation mask
409  *
410  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
411  * The returned wb_congested has its reference count incremented.  Returns
412  * NULL on failure.
413  */
414 struct bdi_writeback_congested *
415 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
416 {
417         struct bdi_writeback_congested *new_congested = NULL, *congested;
418         struct rb_node **node, *parent;
419         unsigned long flags;
420 retry:
421         spin_lock_irqsave(&cgwb_lock, flags);
422
423         node = &bdi->cgwb_congested_tree.rb_node;
424         parent = NULL;
425
426         while (*node != NULL) {
427                 parent = *node;
428                 congested = rb_entry(parent, struct bdi_writeback_congested,
429                                      rb_node);
430                 if (congested->blkcg_id < blkcg_id)
431                         node = &parent->rb_left;
432                 else if (congested->blkcg_id > blkcg_id)
433                         node = &parent->rb_right;
434                 else
435                         goto found;
436         }
437
438         if (new_congested) {
439                 /* !found and storage for new one already allocated, insert */
440                 congested = new_congested;
441                 rb_link_node(&congested->rb_node, parent, node);
442                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
443                 spin_unlock_irqrestore(&cgwb_lock, flags);
444                 return congested;
445         }
446
447         spin_unlock_irqrestore(&cgwb_lock, flags);
448
449         /* allocate storage for new one and retry */
450         new_congested = kzalloc(sizeof(*new_congested), gfp);
451         if (!new_congested)
452                 return NULL;
453
454         refcount_set(&new_congested->refcnt, 1);
455         new_congested->__bdi = bdi;
456         new_congested->blkcg_id = blkcg_id;
457         goto retry;
458
459 found:
460         refcount_inc(&congested->refcnt);
461         spin_unlock_irqrestore(&cgwb_lock, flags);
462         kfree(new_congested);
463         return congested;
464 }
465
466 /**
467  * wb_congested_put - put a wb_congested
468  * @congested: wb_congested to put
469  *
470  * Put @congested and destroy it if the refcnt reaches zero.
471  */
472 void wb_congested_put(struct bdi_writeback_congested *congested)
473 {
474         unsigned long flags;
475
476         if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
477                 return;
478
479         /* bdi might already have been destroyed leaving @congested unlinked */
480         if (congested->__bdi) {
481                 rb_erase(&congested->rb_node,
482                          &congested->__bdi->cgwb_congested_tree);
483                 congested->__bdi = NULL;
484         }
485
486         spin_unlock_irqrestore(&cgwb_lock, flags);
487         kfree(congested);
488 }
489
490 static void cgwb_release_workfn(struct work_struct *work)
491 {
492         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
493                                                 release_work);
494
495         mutex_lock(&wb->bdi->cgwb_release_mutex);
496         wb_shutdown(wb);
497
498         css_put(wb->memcg_css);
499         css_put(wb->blkcg_css);
500         mutex_unlock(&wb->bdi->cgwb_release_mutex);
501
502         fprop_local_destroy_percpu(&wb->memcg_completions);
503         percpu_ref_exit(&wb->refcnt);
504         wb_exit(wb);
505         kfree_rcu(wb, rcu);
506 }
507
508 static void cgwb_release(struct percpu_ref *refcnt)
509 {
510         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
511                                                 refcnt);
512         queue_work(cgwb_release_wq, &wb->release_work);
513 }
514
515 static void cgwb_kill(struct bdi_writeback *wb)
516 {
517         lockdep_assert_held(&cgwb_lock);
518
519         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
520         list_del(&wb->memcg_node);
521         list_del(&wb->blkcg_node);
522         percpu_ref_kill(&wb->refcnt);
523 }
524
525 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
526 {
527         spin_lock_irq(&cgwb_lock);
528         list_del_rcu(&wb->bdi_node);
529         spin_unlock_irq(&cgwb_lock);
530 }
531
532 static int cgwb_create(struct backing_dev_info *bdi,
533                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
534 {
535         struct mem_cgroup *memcg;
536         struct cgroup_subsys_state *blkcg_css;
537         struct blkcg *blkcg;
538         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
539         struct bdi_writeback *wb;
540         unsigned long flags;
541         int ret = 0;
542
543         memcg = mem_cgroup_from_css(memcg_css);
544         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
545         blkcg = css_to_blkcg(blkcg_css);
546         memcg_cgwb_list = &memcg->cgwb_list;
547         blkcg_cgwb_list = &blkcg->cgwb_list;
548
549         /* look up again under lock and discard on blkcg mismatch */
550         spin_lock_irqsave(&cgwb_lock, flags);
551         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
552         if (wb && wb->blkcg_css != blkcg_css) {
553                 cgwb_kill(wb);
554                 wb = NULL;
555         }
556         spin_unlock_irqrestore(&cgwb_lock, flags);
557         if (wb)
558                 goto out_put;
559
560         /* need to create a new one */
561         wb = kmalloc(sizeof(*wb), gfp);
562         if (!wb) {
563                 ret = -ENOMEM;
564                 goto out_put;
565         }
566
567         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
568         if (ret)
569                 goto err_free;
570
571         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
572         if (ret)
573                 goto err_wb_exit;
574
575         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
576         if (ret)
577                 goto err_ref_exit;
578
579         wb->memcg_css = memcg_css;
580         wb->blkcg_css = blkcg_css;
581         INIT_WORK(&wb->release_work, cgwb_release_workfn);
582         set_bit(WB_registered, &wb->state);
583
584         /*
585          * The root wb determines the registered state of the whole bdi and
586          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
587          * whether they're still online.  Don't link @wb if any is dead.
588          * See wb_memcg_offline() and wb_blkcg_offline().
589          */
590         ret = -ENODEV;
591         spin_lock_irqsave(&cgwb_lock, flags);
592         if (test_bit(WB_registered, &bdi->wb.state) &&
593             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
594                 /* we might have raced another instance of this function */
595                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
596                 if (!ret) {
597                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
598                         list_add(&wb->memcg_node, memcg_cgwb_list);
599                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
600                         css_get(memcg_css);
601                         css_get(blkcg_css);
602                 }
603         }
604         spin_unlock_irqrestore(&cgwb_lock, flags);
605         if (ret) {
606                 if (ret == -EEXIST)
607                         ret = 0;
608                 goto err_fprop_exit;
609         }
610         goto out_put;
611
612 err_fprop_exit:
613         fprop_local_destroy_percpu(&wb->memcg_completions);
614 err_ref_exit:
615         percpu_ref_exit(&wb->refcnt);
616 err_wb_exit:
617         wb_exit(wb);
618 err_free:
619         kfree(wb);
620 out_put:
621         css_put(blkcg_css);
622         return ret;
623 }
624
625 /**
626  * wb_get_create - get wb for a given memcg, create if necessary
627  * @bdi: target bdi
628  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
629  * @gfp: allocation mask to use
630  *
631  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
632  * create one.  The returned wb has its refcount incremented.
633  *
634  * This function uses css_get() on @memcg_css and thus expects its refcnt
635  * to be positive on invocation.  IOW, rcu_read_lock() protection on
636  * @memcg_css isn't enough.  try_get it before calling this function.
637  *
638  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
639  * memcg on the default hierarchy, memcg association is guaranteed to be
640  * more specific (equal or descendant to the associated blkcg) and thus can
641  * identify both the memcg and blkcg associations.
642  *
643  * Because the blkcg associated with a memcg may change as blkcg is enabled
644  * and disabled closer to root in the hierarchy, each wb keeps track of
645  * both the memcg and blkcg associated with it and verifies the blkcg on
646  * each lookup.  On mismatch, the existing wb is discarded and a new one is
647  * created.
648  */
649 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
650                                     struct cgroup_subsys_state *memcg_css,
651                                     gfp_t gfp)
652 {
653         struct bdi_writeback *wb;
654
655         might_sleep_if(gfpflags_allow_blocking(gfp));
656
657         if (!memcg_css->parent)
658                 return &bdi->wb;
659
660         do {
661                 rcu_read_lock();
662                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
663                 if (wb) {
664                         struct cgroup_subsys_state *blkcg_css;
665
666                         /* see whether the blkcg association has changed */
667                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
668                                                      &io_cgrp_subsys);
669                         if (unlikely(wb->blkcg_css != blkcg_css ||
670                                      !wb_tryget(wb)))
671                                 wb = NULL;
672                         css_put(blkcg_css);
673                 }
674                 rcu_read_unlock();
675         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
676
677         return wb;
678 }
679
680 static int cgwb_bdi_init(struct backing_dev_info *bdi)
681 {
682         int ret;
683
684         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
685         bdi->cgwb_congested_tree = RB_ROOT;
686         mutex_init(&bdi->cgwb_release_mutex);
687
688         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
689         if (!ret) {
690                 bdi->wb.memcg_css = &root_mem_cgroup->css;
691                 bdi->wb.blkcg_css = blkcg_root_css;
692         }
693         return ret;
694 }
695
696 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
697 {
698         struct radix_tree_iter iter;
699         void **slot;
700         struct bdi_writeback *wb;
701
702         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
703
704         spin_lock_irq(&cgwb_lock);
705         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
706                 cgwb_kill(*slot);
707         spin_unlock_irq(&cgwb_lock);
708
709         mutex_lock(&bdi->cgwb_release_mutex);
710         spin_lock_irq(&cgwb_lock);
711         while (!list_empty(&bdi->wb_list)) {
712                 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
713                                       bdi_node);
714                 spin_unlock_irq(&cgwb_lock);
715                 wb_shutdown(wb);
716                 spin_lock_irq(&cgwb_lock);
717         }
718         spin_unlock_irq(&cgwb_lock);
719         mutex_unlock(&bdi->cgwb_release_mutex);
720 }
721
722 /**
723  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
724  * @memcg: memcg being offlined
725  *
726  * Also prevents creation of any new wb's associated with @memcg.
727  */
728 void wb_memcg_offline(struct mem_cgroup *memcg)
729 {
730         struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
731         struct bdi_writeback *wb, *next;
732
733         spin_lock_irq(&cgwb_lock);
734         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
735                 cgwb_kill(wb);
736         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
737         spin_unlock_irq(&cgwb_lock);
738 }
739
740 /**
741  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
742  * @blkcg: blkcg being offlined
743  *
744  * Also prevents creation of any new wb's associated with @blkcg.
745  */
746 void wb_blkcg_offline(struct blkcg *blkcg)
747 {
748         struct bdi_writeback *wb, *next;
749
750         spin_lock_irq(&cgwb_lock);
751         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
752                 cgwb_kill(wb);
753         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
754         spin_unlock_irq(&cgwb_lock);
755 }
756
757 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
758 {
759         struct rb_node *rbn;
760
761         spin_lock_irq(&cgwb_lock);
762         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
763                 struct bdi_writeback_congested *congested =
764                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
765
766                 rb_erase(rbn, &bdi->cgwb_congested_tree);
767                 congested->__bdi = NULL;        /* mark @congested unlinked */
768         }
769         spin_unlock_irq(&cgwb_lock);
770 }
771
772 static void cgwb_bdi_register(struct backing_dev_info *bdi)
773 {
774         spin_lock_irq(&cgwb_lock);
775         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
776         spin_unlock_irq(&cgwb_lock);
777 }
778
779 static int __init cgwb_init(void)
780 {
781         /*
782          * There can be many concurrent release work items overwhelming
783          * system_wq.  Put them in a separate wq and limit concurrency.
784          * There's no point in executing many of these in parallel.
785          */
786         cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
787         if (!cgwb_release_wq)
788                 return -ENOMEM;
789
790         return 0;
791 }
792 subsys_initcall(cgwb_init);
793
794 #else   /* CONFIG_CGROUP_WRITEBACK */
795
796 static int cgwb_bdi_init(struct backing_dev_info *bdi)
797 {
798         int err;
799
800         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
801         if (!bdi->wb_congested)
802                 return -ENOMEM;
803
804         refcount_set(&bdi->wb_congested->refcnt, 1);
805
806         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
807         if (err) {
808                 wb_congested_put(bdi->wb_congested);
809                 return err;
810         }
811         return 0;
812 }
813
814 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
815
816 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
817 {
818         wb_congested_put(bdi->wb_congested);
819 }
820
821 static void cgwb_bdi_register(struct backing_dev_info *bdi)
822 {
823         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
824 }
825
826 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
827 {
828         list_del_rcu(&wb->bdi_node);
829 }
830
831 #endif  /* CONFIG_CGROUP_WRITEBACK */
832
833 static int bdi_init(struct backing_dev_info *bdi)
834 {
835         int ret;
836
837         bdi->dev = NULL;
838
839         kref_init(&bdi->refcnt);
840         bdi->min_ratio = 0;
841         bdi->max_ratio = 100;
842         bdi->max_prop_frac = FPROP_FRAC_BASE;
843         INIT_LIST_HEAD(&bdi->bdi_list);
844         INIT_LIST_HEAD(&bdi->wb_list);
845         init_waitqueue_head(&bdi->wb_waitq);
846
847         ret = cgwb_bdi_init(bdi);
848
849         return ret;
850 }
851
852 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
853 {
854         struct backing_dev_info *bdi;
855
856         bdi = kmalloc_node(sizeof(struct backing_dev_info),
857                            gfp_mask | __GFP_ZERO, node_id);
858         if (!bdi)
859                 return NULL;
860
861         if (bdi_init(bdi)) {
862                 kfree(bdi);
863                 return NULL;
864         }
865         return bdi;
866 }
867 EXPORT_SYMBOL(bdi_alloc_node);
868
869 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
870 {
871         struct device *dev;
872
873         if (bdi->dev)   /* The driver needs to use separate queues per device */
874                 return 0;
875
876         dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
877         if (IS_ERR(dev))
878                 return PTR_ERR(dev);
879
880         cgwb_bdi_register(bdi);
881         bdi->dev = dev;
882
883         bdi_debug_register(bdi, dev_name(dev));
884         set_bit(WB_registered, &bdi->wb.state);
885
886         spin_lock_bh(&bdi_lock);
887         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
888         spin_unlock_bh(&bdi_lock);
889
890         trace_writeback_bdi_register(bdi);
891         return 0;
892 }
893 EXPORT_SYMBOL(bdi_register_va);
894
895 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
896 {
897         va_list args;
898         int ret;
899
900         va_start(args, fmt);
901         ret = bdi_register_va(bdi, fmt, args);
902         va_end(args);
903         return ret;
904 }
905 EXPORT_SYMBOL(bdi_register);
906
907 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
908 {
909         int rc;
910
911         rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
912         if (rc)
913                 return rc;
914         /* Leaking owner reference... */
915         WARN_ON(bdi->owner);
916         bdi->owner = owner;
917         get_device(owner);
918         return 0;
919 }
920 EXPORT_SYMBOL(bdi_register_owner);
921
922 /*
923  * Remove bdi from bdi_list, and ensure that it is no longer visible
924  */
925 static void bdi_remove_from_list(struct backing_dev_info *bdi)
926 {
927         spin_lock_bh(&bdi_lock);
928         list_del_rcu(&bdi->bdi_list);
929         spin_unlock_bh(&bdi_lock);
930
931         synchronize_rcu_expedited();
932 }
933
934 void bdi_unregister(struct backing_dev_info *bdi)
935 {
936         /* make sure nobody finds us on the bdi_list anymore */
937         bdi_remove_from_list(bdi);
938         wb_shutdown(&bdi->wb);
939         cgwb_bdi_unregister(bdi);
940
941         if (bdi->dev) {
942                 bdi_debug_unregister(bdi);
943                 device_unregister(bdi->dev);
944                 bdi->dev = NULL;
945         }
946
947         if (bdi->owner) {
948                 put_device(bdi->owner);
949                 bdi->owner = NULL;
950         }
951 }
952
953 static void release_bdi(struct kref *ref)
954 {
955         struct backing_dev_info *bdi =
956                         container_of(ref, struct backing_dev_info, refcnt);
957
958         if (test_bit(WB_registered, &bdi->wb.state))
959                 bdi_unregister(bdi);
960         WARN_ON_ONCE(bdi->dev);
961         wb_exit(&bdi->wb);
962         cgwb_bdi_exit(bdi);
963         kfree(bdi);
964 }
965
966 void bdi_put(struct backing_dev_info *bdi)
967 {
968         kref_put(&bdi->refcnt, release_bdi);
969 }
970 EXPORT_SYMBOL(bdi_put);
971
972 static wait_queue_head_t congestion_wqh[2] = {
973                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
974                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
975         };
976 static atomic_t nr_wb_congested[2];
977
978 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
979 {
980         wait_queue_head_t *wqh = &congestion_wqh[sync];
981         enum wb_congested_state bit;
982
983         bit = sync ? WB_sync_congested : WB_async_congested;
984         if (test_and_clear_bit(bit, &congested->state))
985                 atomic_dec(&nr_wb_congested[sync]);
986         smp_mb__after_atomic();
987         if (waitqueue_active(wqh))
988                 wake_up(wqh);
989 }
990 EXPORT_SYMBOL(clear_wb_congested);
991
992 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
993 {
994         enum wb_congested_state bit;
995
996         bit = sync ? WB_sync_congested : WB_async_congested;
997         if (!test_and_set_bit(bit, &congested->state))
998                 atomic_inc(&nr_wb_congested[sync]);
999 }
1000 EXPORT_SYMBOL(set_wb_congested);
1001
1002 /**
1003  * congestion_wait - wait for a backing_dev to become uncongested
1004  * @sync: SYNC or ASYNC IO
1005  * @timeout: timeout in jiffies
1006  *
1007  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1008  * write congestion.  If no backing_devs are congested then just wait for the
1009  * next write to be completed.
1010  */
1011 long congestion_wait(int sync, long timeout)
1012 {
1013         long ret;
1014         unsigned long start = jiffies;
1015         DEFINE_WAIT(wait);
1016         wait_queue_head_t *wqh = &congestion_wqh[sync];
1017
1018         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1019         ret = io_schedule_timeout(timeout);
1020         finish_wait(wqh, &wait);
1021
1022         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1023                                         jiffies_to_usecs(jiffies - start));
1024
1025         return ret;
1026 }
1027 EXPORT_SYMBOL(congestion_wait);
1028
1029 /**
1030  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1031  * @sync: SYNC or ASYNC IO
1032  * @timeout: timeout in jiffies
1033  *
1034  * In the event of a congested backing_dev (any backing_dev) this waits
1035  * for up to @timeout jiffies for either a BDI to exit congestion of the
1036  * given @sync queue or a write to complete.
1037  *
1038  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1039  * it is the number of jiffies that were still remaining when the function
1040  * returned. return_value == timeout implies the function did not sleep.
1041  */
1042 long wait_iff_congested(int sync, long timeout)
1043 {
1044         long ret;
1045         unsigned long start = jiffies;
1046         DEFINE_WAIT(wait);
1047         wait_queue_head_t *wqh = &congestion_wqh[sync];
1048
1049         /*
1050          * If there is no congestion, yield if necessary instead
1051          * of sleeping on the congestion queue
1052          */
1053         if (atomic_read(&nr_wb_congested[sync]) == 0) {
1054                 cond_resched();
1055
1056                 /* In case we scheduled, work out time remaining */
1057                 ret = timeout - (jiffies - start);
1058                 if (ret < 0)
1059                         ret = 0;
1060
1061                 goto out;
1062         }
1063
1064         /* Sleep until uncongested or a write happens */
1065         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1066         ret = io_schedule_timeout(timeout);
1067         finish_wait(wqh, &wait);
1068
1069 out:
1070         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1071                                         jiffies_to_usecs(jiffies - start));
1072
1073         return ret;
1074 }
1075 EXPORT_SYMBOL(wait_iff_congested);