userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE is not set
[platform/kernel/linux-rpi.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14
15 struct backing_dev_info noop_backing_dev_info = {
16         .name           = "noop",
17         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
18 };
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
20
21 static struct class *bdi_class;
22
23 /*
24  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
25  * locking.
26  */
27 DEFINE_SPINLOCK(bdi_lock);
28 LIST_HEAD(bdi_list);
29
30 /* bdi_wq serves all asynchronous writeback tasks */
31 struct workqueue_struct *bdi_wq;
32
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36
37 static struct dentry *bdi_debug_root;
38
39 static void bdi_debug_init(void)
40 {
41         bdi_debug_root = debugfs_create_dir("bdi", NULL);
42 }
43
44 static int bdi_debug_stats_show(struct seq_file *m, void *v)
45 {
46         struct backing_dev_info *bdi = m->private;
47         struct bdi_writeback *wb = &bdi->wb;
48         unsigned long background_thresh;
49         unsigned long dirty_thresh;
50         unsigned long wb_thresh;
51         unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
52         struct inode *inode;
53
54         nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
55         spin_lock(&wb->list_lock);
56         list_for_each_entry(inode, &wb->b_dirty, i_io_list)
57                 nr_dirty++;
58         list_for_each_entry(inode, &wb->b_io, i_io_list)
59                 nr_io++;
60         list_for_each_entry(inode, &wb->b_more_io, i_io_list)
61                 nr_more_io++;
62         list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
63                 if (inode->i_state & I_DIRTY_TIME)
64                         nr_dirty_time++;
65         spin_unlock(&wb->list_lock);
66
67         global_dirty_limits(&background_thresh, &dirty_thresh);
68         wb_thresh = wb_calc_thresh(wb, dirty_thresh);
69
70 #define K(x) ((x) << (PAGE_SHIFT - 10))
71         seq_printf(m,
72                    "BdiWriteback:       %10lu kB\n"
73                    "BdiReclaimable:     %10lu kB\n"
74                    "BdiDirtyThresh:     %10lu kB\n"
75                    "DirtyThresh:        %10lu kB\n"
76                    "BackgroundThresh:   %10lu kB\n"
77                    "BdiDirtied:         %10lu kB\n"
78                    "BdiWritten:         %10lu kB\n"
79                    "BdiWriteBandwidth:  %10lu kBps\n"
80                    "b_dirty:            %10lu\n"
81                    "b_io:               %10lu\n"
82                    "b_more_io:          %10lu\n"
83                    "b_dirty_time:       %10lu\n"
84                    "bdi_list:           %10u\n"
85                    "state:              %10lx\n",
86                    (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
87                    (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
88                    K(wb_thresh),
89                    K(dirty_thresh),
90                    K(background_thresh),
91                    (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
92                    (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
93                    (unsigned long) K(wb->write_bandwidth),
94                    nr_dirty,
95                    nr_io,
96                    nr_more_io,
97                    nr_dirty_time,
98                    !list_empty(&bdi->bdi_list), bdi->wb.state);
99 #undef K
100
101         return 0;
102 }
103
104 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
105 {
106         return single_open(file, bdi_debug_stats_show, inode->i_private);
107 }
108
109 static const struct file_operations bdi_debug_stats_fops = {
110         .open           = bdi_debug_stats_open,
111         .read           = seq_read,
112         .llseek         = seq_lseek,
113         .release        = single_release,
114 };
115
116 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
117 {
118         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
119         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
120                                                bdi, &bdi_debug_stats_fops);
121 }
122
123 static void bdi_debug_unregister(struct backing_dev_info *bdi)
124 {
125         debugfs_remove(bdi->debug_stats);
126         debugfs_remove(bdi->debug_dir);
127 }
128 #else
129 static inline void bdi_debug_init(void)
130 {
131 }
132 static inline void bdi_debug_register(struct backing_dev_info *bdi,
133                                       const char *name)
134 {
135 }
136 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
137 {
138 }
139 #endif
140
141 static ssize_t read_ahead_kb_store(struct device *dev,
142                                   struct device_attribute *attr,
143                                   const char *buf, size_t count)
144 {
145         struct backing_dev_info *bdi = dev_get_drvdata(dev);
146         unsigned long read_ahead_kb;
147         ssize_t ret;
148
149         ret = kstrtoul(buf, 10, &read_ahead_kb);
150         if (ret < 0)
151                 return ret;
152
153         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
154
155         return count;
156 }
157
158 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
159
160 #define BDI_SHOW(name, expr)                                            \
161 static ssize_t name##_show(struct device *dev,                          \
162                            struct device_attribute *attr, char *page)   \
163 {                                                                       \
164         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
165                                                                         \
166         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
167 }                                                                       \
168 static DEVICE_ATTR_RW(name);
169
170 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
171
172 static ssize_t min_ratio_store(struct device *dev,
173                 struct device_attribute *attr, const char *buf, size_t count)
174 {
175         struct backing_dev_info *bdi = dev_get_drvdata(dev);
176         unsigned int ratio;
177         ssize_t ret;
178
179         ret = kstrtouint(buf, 10, &ratio);
180         if (ret < 0)
181                 return ret;
182
183         ret = bdi_set_min_ratio(bdi, ratio);
184         if (!ret)
185                 ret = count;
186
187         return ret;
188 }
189 BDI_SHOW(min_ratio, bdi->min_ratio)
190
191 static ssize_t max_ratio_store(struct device *dev,
192                 struct device_attribute *attr, const char *buf, size_t count)
193 {
194         struct backing_dev_info *bdi = dev_get_drvdata(dev);
195         unsigned int ratio;
196         ssize_t ret;
197
198         ret = kstrtouint(buf, 10, &ratio);
199         if (ret < 0)
200                 return ret;
201
202         ret = bdi_set_max_ratio(bdi, ratio);
203         if (!ret)
204                 ret = count;
205
206         return ret;
207 }
208 BDI_SHOW(max_ratio, bdi->max_ratio)
209
210 static ssize_t stable_pages_required_show(struct device *dev,
211                                           struct device_attribute *attr,
212                                           char *page)
213 {
214         struct backing_dev_info *bdi = dev_get_drvdata(dev);
215
216         return snprintf(page, PAGE_SIZE-1, "%d\n",
217                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
218 }
219 static DEVICE_ATTR_RO(stable_pages_required);
220
221 static struct attribute *bdi_dev_attrs[] = {
222         &dev_attr_read_ahead_kb.attr,
223         &dev_attr_min_ratio.attr,
224         &dev_attr_max_ratio.attr,
225         &dev_attr_stable_pages_required.attr,
226         NULL,
227 };
228 ATTRIBUTE_GROUPS(bdi_dev);
229
230 static __init int bdi_class_init(void)
231 {
232         bdi_class = class_create(THIS_MODULE, "bdi");
233         if (IS_ERR(bdi_class))
234                 return PTR_ERR(bdi_class);
235
236         bdi_class->dev_groups = bdi_dev_groups;
237         bdi_debug_init();
238
239         return 0;
240 }
241 postcore_initcall(bdi_class_init);
242
243 static int bdi_init(struct backing_dev_info *bdi);
244
245 static int __init default_bdi_init(void)
246 {
247         int err;
248
249         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
250                                               WQ_UNBOUND | WQ_SYSFS, 0);
251         if (!bdi_wq)
252                 return -ENOMEM;
253
254         err = bdi_init(&noop_backing_dev_info);
255
256         return err;
257 }
258 subsys_initcall(default_bdi_init);
259
260 /*
261  * This function is used when the first inode for this wb is marked dirty. It
262  * wakes-up the corresponding bdi thread which should then take care of the
263  * periodic background write-out of dirty inodes. Since the write-out would
264  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
265  * set up a timer which wakes the bdi thread up later.
266  *
267  * Note, we wouldn't bother setting up the timer, but this function is on the
268  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
269  * by delaying the wake-up.
270  *
271  * We have to be careful not to postpone flush work if it is scheduled for
272  * earlier. Thus we use queue_delayed_work().
273  */
274 void wb_wakeup_delayed(struct bdi_writeback *wb)
275 {
276         unsigned long timeout;
277
278         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
279         spin_lock_bh(&wb->work_lock);
280         if (test_bit(WB_registered, &wb->state))
281                 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
282         spin_unlock_bh(&wb->work_lock);
283 }
284
285 /*
286  * Initial write bandwidth: 100 MB/s
287  */
288 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
289
290 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
291                    int blkcg_id, gfp_t gfp)
292 {
293         int i, err;
294
295         memset(wb, 0, sizeof(*wb));
296
297         if (wb != &bdi->wb)
298                 bdi_get(bdi);
299         wb->bdi = bdi;
300         wb->last_old_flush = jiffies;
301         INIT_LIST_HEAD(&wb->b_dirty);
302         INIT_LIST_HEAD(&wb->b_io);
303         INIT_LIST_HEAD(&wb->b_more_io);
304         INIT_LIST_HEAD(&wb->b_dirty_time);
305         spin_lock_init(&wb->list_lock);
306
307         wb->bw_time_stamp = jiffies;
308         wb->balanced_dirty_ratelimit = INIT_BW;
309         wb->dirty_ratelimit = INIT_BW;
310         wb->write_bandwidth = INIT_BW;
311         wb->avg_write_bandwidth = INIT_BW;
312
313         spin_lock_init(&wb->work_lock);
314         INIT_LIST_HEAD(&wb->work_list);
315         INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
316         wb->dirty_sleep = jiffies;
317
318         wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
319         if (!wb->congested) {
320                 err = -ENOMEM;
321                 goto out_put_bdi;
322         }
323
324         err = fprop_local_init_percpu(&wb->completions, gfp);
325         if (err)
326                 goto out_put_cong;
327
328         for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
329                 err = percpu_counter_init(&wb->stat[i], 0, gfp);
330                 if (err)
331                         goto out_destroy_stat;
332         }
333
334         return 0;
335
336 out_destroy_stat:
337         while (i--)
338                 percpu_counter_destroy(&wb->stat[i]);
339         fprop_local_destroy_percpu(&wb->completions);
340 out_put_cong:
341         wb_congested_put(wb->congested);
342 out_put_bdi:
343         if (wb != &bdi->wb)
344                 bdi_put(bdi);
345         return err;
346 }
347
348 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
349
350 /*
351  * Remove bdi from the global list and shutdown any threads we have running
352  */
353 static void wb_shutdown(struct bdi_writeback *wb)
354 {
355         /* Make sure nobody queues further work */
356         spin_lock_bh(&wb->work_lock);
357         if (!test_and_clear_bit(WB_registered, &wb->state)) {
358                 spin_unlock_bh(&wb->work_lock);
359                 return;
360         }
361         spin_unlock_bh(&wb->work_lock);
362
363         cgwb_remove_from_bdi_list(wb);
364         /*
365          * Drain work list and shutdown the delayed_work.  !WB_registered
366          * tells wb_workfn() that @wb is dying and its work_list needs to
367          * be drained no matter what.
368          */
369         mod_delayed_work(bdi_wq, &wb->dwork, 0);
370         flush_delayed_work(&wb->dwork);
371         WARN_ON(!list_empty(&wb->work_list));
372 }
373
374 static void wb_exit(struct bdi_writeback *wb)
375 {
376         int i;
377
378         WARN_ON(delayed_work_pending(&wb->dwork));
379
380         for (i = 0; i < NR_WB_STAT_ITEMS; i++)
381                 percpu_counter_destroy(&wb->stat[i]);
382
383         fprop_local_destroy_percpu(&wb->completions);
384         wb_congested_put(wb->congested);
385         if (wb != &wb->bdi->wb)
386                 bdi_put(wb->bdi);
387 }
388
389 #ifdef CONFIG_CGROUP_WRITEBACK
390
391 #include <linux/memcontrol.h>
392
393 /*
394  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
395  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
396  * protected.
397  */
398 static DEFINE_SPINLOCK(cgwb_lock);
399 static struct workqueue_struct *cgwb_release_wq;
400
401 /**
402  * wb_congested_get_create - get or create a wb_congested
403  * @bdi: associated bdi
404  * @blkcg_id: ID of the associated blkcg
405  * @gfp: allocation mask
406  *
407  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
408  * The returned wb_congested has its reference count incremented.  Returns
409  * NULL on failure.
410  */
411 struct bdi_writeback_congested *
412 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
413 {
414         struct bdi_writeback_congested *new_congested = NULL, *congested;
415         struct rb_node **node, *parent;
416         unsigned long flags;
417 retry:
418         spin_lock_irqsave(&cgwb_lock, flags);
419
420         node = &bdi->cgwb_congested_tree.rb_node;
421         parent = NULL;
422
423         while (*node != NULL) {
424                 parent = *node;
425                 congested = rb_entry(parent, struct bdi_writeback_congested,
426                                      rb_node);
427                 if (congested->blkcg_id < blkcg_id)
428                         node = &parent->rb_left;
429                 else if (congested->blkcg_id > blkcg_id)
430                         node = &parent->rb_right;
431                 else
432                         goto found;
433         }
434
435         if (new_congested) {
436                 /* !found and storage for new one already allocated, insert */
437                 congested = new_congested;
438                 new_congested = NULL;
439                 rb_link_node(&congested->rb_node, parent, node);
440                 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
441                 goto found;
442         }
443
444         spin_unlock_irqrestore(&cgwb_lock, flags);
445
446         /* allocate storage for new one and retry */
447         new_congested = kzalloc(sizeof(*new_congested), gfp);
448         if (!new_congested)
449                 return NULL;
450
451         atomic_set(&new_congested->refcnt, 0);
452         new_congested->__bdi = bdi;
453         new_congested->blkcg_id = blkcg_id;
454         goto retry;
455
456 found:
457         atomic_inc(&congested->refcnt);
458         spin_unlock_irqrestore(&cgwb_lock, flags);
459         kfree(new_congested);
460         return congested;
461 }
462
463 /**
464  * wb_congested_put - put a wb_congested
465  * @congested: wb_congested to put
466  *
467  * Put @congested and destroy it if the refcnt reaches zero.
468  */
469 void wb_congested_put(struct bdi_writeback_congested *congested)
470 {
471         unsigned long flags;
472
473         local_irq_save(flags);
474         if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
475                 local_irq_restore(flags);
476                 return;
477         }
478
479         /* bdi might already have been destroyed leaving @congested unlinked */
480         if (congested->__bdi) {
481                 rb_erase(&congested->rb_node,
482                          &congested->__bdi->cgwb_congested_tree);
483                 congested->__bdi = NULL;
484         }
485
486         spin_unlock_irqrestore(&cgwb_lock, flags);
487         kfree(congested);
488 }
489
490 static void cgwb_release_workfn(struct work_struct *work)
491 {
492         struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
493                                                 release_work);
494
495         mutex_lock(&wb->bdi->cgwb_release_mutex);
496         wb_shutdown(wb);
497
498         css_put(wb->memcg_css);
499         css_put(wb->blkcg_css);
500         mutex_unlock(&wb->bdi->cgwb_release_mutex);
501
502         fprop_local_destroy_percpu(&wb->memcg_completions);
503         percpu_ref_exit(&wb->refcnt);
504         wb_exit(wb);
505         kfree_rcu(wb, rcu);
506 }
507
508 static void cgwb_release(struct percpu_ref *refcnt)
509 {
510         struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
511                                                 refcnt);
512         queue_work(cgwb_release_wq, &wb->release_work);
513 }
514
515 static void cgwb_kill(struct bdi_writeback *wb)
516 {
517         lockdep_assert_held(&cgwb_lock);
518
519         WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
520         list_del(&wb->memcg_node);
521         list_del(&wb->blkcg_node);
522         percpu_ref_kill(&wb->refcnt);
523 }
524
525 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
526 {
527         spin_lock_irq(&cgwb_lock);
528         list_del_rcu(&wb->bdi_node);
529         spin_unlock_irq(&cgwb_lock);
530 }
531
532 static int cgwb_create(struct backing_dev_info *bdi,
533                        struct cgroup_subsys_state *memcg_css, gfp_t gfp)
534 {
535         struct mem_cgroup *memcg;
536         struct cgroup_subsys_state *blkcg_css;
537         struct blkcg *blkcg;
538         struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
539         struct bdi_writeback *wb;
540         unsigned long flags;
541         int ret = 0;
542
543         memcg = mem_cgroup_from_css(memcg_css);
544         blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
545         blkcg = css_to_blkcg(blkcg_css);
546         memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
547         blkcg_cgwb_list = &blkcg->cgwb_list;
548
549         /* look up again under lock and discard on blkcg mismatch */
550         spin_lock_irqsave(&cgwb_lock, flags);
551         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
552         if (wb && wb->blkcg_css != blkcg_css) {
553                 cgwb_kill(wb);
554                 wb = NULL;
555         }
556         spin_unlock_irqrestore(&cgwb_lock, flags);
557         if (wb)
558                 goto out_put;
559
560         /* need to create a new one */
561         wb = kmalloc(sizeof(*wb), gfp);
562         if (!wb) {
563                 ret = -ENOMEM;
564                 goto out_put;
565         }
566
567         ret = wb_init(wb, bdi, blkcg_css->id, gfp);
568         if (ret)
569                 goto err_free;
570
571         ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
572         if (ret)
573                 goto err_wb_exit;
574
575         ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
576         if (ret)
577                 goto err_ref_exit;
578
579         wb->memcg_css = memcg_css;
580         wb->blkcg_css = blkcg_css;
581         INIT_WORK(&wb->release_work, cgwb_release_workfn);
582         set_bit(WB_registered, &wb->state);
583
584         /*
585          * The root wb determines the registered state of the whole bdi and
586          * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
587          * whether they're still online.  Don't link @wb if any is dead.
588          * See wb_memcg_offline() and wb_blkcg_offline().
589          */
590         ret = -ENODEV;
591         spin_lock_irqsave(&cgwb_lock, flags);
592         if (test_bit(WB_registered, &bdi->wb.state) &&
593             blkcg_cgwb_list->next && memcg_cgwb_list->next) {
594                 /* we might have raced another instance of this function */
595                 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
596                 if (!ret) {
597                         list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
598                         list_add(&wb->memcg_node, memcg_cgwb_list);
599                         list_add(&wb->blkcg_node, blkcg_cgwb_list);
600                         css_get(memcg_css);
601                         css_get(blkcg_css);
602                 }
603         }
604         spin_unlock_irqrestore(&cgwb_lock, flags);
605         if (ret) {
606                 if (ret == -EEXIST)
607                         ret = 0;
608                 goto err_fprop_exit;
609         }
610         goto out_put;
611
612 err_fprop_exit:
613         fprop_local_destroy_percpu(&wb->memcg_completions);
614 err_ref_exit:
615         percpu_ref_exit(&wb->refcnt);
616 err_wb_exit:
617         wb_exit(wb);
618 err_free:
619         kfree(wb);
620 out_put:
621         css_put(blkcg_css);
622         return ret;
623 }
624
625 /**
626  * wb_get_create - get wb for a given memcg, create if necessary
627  * @bdi: target bdi
628  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
629  * @gfp: allocation mask to use
630  *
631  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
632  * create one.  The returned wb has its refcount incremented.
633  *
634  * This function uses css_get() on @memcg_css and thus expects its refcnt
635  * to be positive on invocation.  IOW, rcu_read_lock() protection on
636  * @memcg_css isn't enough.  try_get it before calling this function.
637  *
638  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
639  * memcg on the default hierarchy, memcg association is guaranteed to be
640  * more specific (equal or descendant to the associated blkcg) and thus can
641  * identify both the memcg and blkcg associations.
642  *
643  * Because the blkcg associated with a memcg may change as blkcg is enabled
644  * and disabled closer to root in the hierarchy, each wb keeps track of
645  * both the memcg and blkcg associated with it and verifies the blkcg on
646  * each lookup.  On mismatch, the existing wb is discarded and a new one is
647  * created.
648  */
649 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
650                                     struct cgroup_subsys_state *memcg_css,
651                                     gfp_t gfp)
652 {
653         struct bdi_writeback *wb;
654
655         might_sleep_if(gfpflags_allow_blocking(gfp));
656
657         if (!memcg_css->parent)
658                 return &bdi->wb;
659
660         do {
661                 rcu_read_lock();
662                 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
663                 if (wb) {
664                         struct cgroup_subsys_state *blkcg_css;
665
666                         /* see whether the blkcg association has changed */
667                         blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
668                                                      &io_cgrp_subsys);
669                         if (unlikely(wb->blkcg_css != blkcg_css ||
670                                      !wb_tryget(wb)))
671                                 wb = NULL;
672                         css_put(blkcg_css);
673                 }
674                 rcu_read_unlock();
675         } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
676
677         return wb;
678 }
679
680 static int cgwb_bdi_init(struct backing_dev_info *bdi)
681 {
682         int ret;
683
684         INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
685         bdi->cgwb_congested_tree = RB_ROOT;
686         mutex_init(&bdi->cgwb_release_mutex);
687
688         ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
689         if (!ret) {
690                 bdi->wb.memcg_css = &root_mem_cgroup->css;
691                 bdi->wb.blkcg_css = blkcg_root_css;
692         }
693         return ret;
694 }
695
696 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
697 {
698         struct radix_tree_iter iter;
699         void **slot;
700         struct bdi_writeback *wb;
701
702         WARN_ON(test_bit(WB_registered, &bdi->wb.state));
703
704         spin_lock_irq(&cgwb_lock);
705         radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
706                 cgwb_kill(*slot);
707         spin_unlock_irq(&cgwb_lock);
708
709         mutex_lock(&bdi->cgwb_release_mutex);
710         spin_lock_irq(&cgwb_lock);
711         while (!list_empty(&bdi->wb_list)) {
712                 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
713                                       bdi_node);
714                 spin_unlock_irq(&cgwb_lock);
715                 wb_shutdown(wb);
716                 spin_lock_irq(&cgwb_lock);
717         }
718         spin_unlock_irq(&cgwb_lock);
719         mutex_unlock(&bdi->cgwb_release_mutex);
720 }
721
722 /**
723  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
724  * @memcg: memcg being offlined
725  *
726  * Also prevents creation of any new wb's associated with @memcg.
727  */
728 void wb_memcg_offline(struct mem_cgroup *memcg)
729 {
730         LIST_HEAD(to_destroy);
731         struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
732         struct bdi_writeback *wb, *next;
733
734         spin_lock_irq(&cgwb_lock);
735         list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
736                 cgwb_kill(wb);
737         memcg_cgwb_list->next = NULL;   /* prevent new wb's */
738         spin_unlock_irq(&cgwb_lock);
739 }
740
741 /**
742  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
743  * @blkcg: blkcg being offlined
744  *
745  * Also prevents creation of any new wb's associated with @blkcg.
746  */
747 void wb_blkcg_offline(struct blkcg *blkcg)
748 {
749         LIST_HEAD(to_destroy);
750         struct bdi_writeback *wb, *next;
751
752         spin_lock_irq(&cgwb_lock);
753         list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
754                 cgwb_kill(wb);
755         blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
756         spin_unlock_irq(&cgwb_lock);
757 }
758
759 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
760 {
761         struct rb_node *rbn;
762
763         spin_lock_irq(&cgwb_lock);
764         while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
765                 struct bdi_writeback_congested *congested =
766                         rb_entry(rbn, struct bdi_writeback_congested, rb_node);
767
768                 rb_erase(rbn, &bdi->cgwb_congested_tree);
769                 congested->__bdi = NULL;        /* mark @congested unlinked */
770         }
771         spin_unlock_irq(&cgwb_lock);
772 }
773
774 static void cgwb_bdi_register(struct backing_dev_info *bdi)
775 {
776         spin_lock_irq(&cgwb_lock);
777         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
778         spin_unlock_irq(&cgwb_lock);
779 }
780
781 static int __init cgwb_init(void)
782 {
783         /*
784          * There can be many concurrent release work items overwhelming
785          * system_wq.  Put them in a separate wq and limit concurrency.
786          * There's no point in executing many of these in parallel.
787          */
788         cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
789         if (!cgwb_release_wq)
790                 return -ENOMEM;
791
792         return 0;
793 }
794 subsys_initcall(cgwb_init);
795
796 #else   /* CONFIG_CGROUP_WRITEBACK */
797
798 static int cgwb_bdi_init(struct backing_dev_info *bdi)
799 {
800         int err;
801
802         bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
803         if (!bdi->wb_congested)
804                 return -ENOMEM;
805
806         atomic_set(&bdi->wb_congested->refcnt, 1);
807
808         err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
809         if (err) {
810                 wb_congested_put(bdi->wb_congested);
811                 return err;
812         }
813         return 0;
814 }
815
816 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
817
818 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
819 {
820         wb_congested_put(bdi->wb_congested);
821 }
822
823 static void cgwb_bdi_register(struct backing_dev_info *bdi)
824 {
825         list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
826 }
827
828 static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
829 {
830         list_del_rcu(&wb->bdi_node);
831 }
832
833 #endif  /* CONFIG_CGROUP_WRITEBACK */
834
835 static int bdi_init(struct backing_dev_info *bdi)
836 {
837         int ret;
838
839         bdi->dev = NULL;
840
841         kref_init(&bdi->refcnt);
842         bdi->min_ratio = 0;
843         bdi->max_ratio = 100;
844         bdi->max_prop_frac = FPROP_FRAC_BASE;
845         INIT_LIST_HEAD(&bdi->bdi_list);
846         INIT_LIST_HEAD(&bdi->wb_list);
847         init_waitqueue_head(&bdi->wb_waitq);
848
849         ret = cgwb_bdi_init(bdi);
850
851         return ret;
852 }
853
854 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
855 {
856         struct backing_dev_info *bdi;
857
858         bdi = kmalloc_node(sizeof(struct backing_dev_info),
859                            gfp_mask | __GFP_ZERO, node_id);
860         if (!bdi)
861                 return NULL;
862
863         if (bdi_init(bdi)) {
864                 kfree(bdi);
865                 return NULL;
866         }
867         return bdi;
868 }
869 EXPORT_SYMBOL(bdi_alloc_node);
870
871 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
872 {
873         struct device *dev;
874
875         if (bdi->dev)   /* The driver needs to use separate queues per device */
876                 return 0;
877
878         dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
879         if (IS_ERR(dev))
880                 return PTR_ERR(dev);
881
882         cgwb_bdi_register(bdi);
883         bdi->dev = dev;
884
885         bdi_debug_register(bdi, dev_name(dev));
886         set_bit(WB_registered, &bdi->wb.state);
887
888         spin_lock_bh(&bdi_lock);
889         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
890         spin_unlock_bh(&bdi_lock);
891
892         trace_writeback_bdi_register(bdi);
893         return 0;
894 }
895 EXPORT_SYMBOL(bdi_register_va);
896
897 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
898 {
899         va_list args;
900         int ret;
901
902         va_start(args, fmt);
903         ret = bdi_register_va(bdi, fmt, args);
904         va_end(args);
905         return ret;
906 }
907 EXPORT_SYMBOL(bdi_register);
908
909 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
910 {
911         int rc;
912
913         rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
914         if (rc)
915                 return rc;
916         /* Leaking owner reference... */
917         WARN_ON(bdi->owner);
918         bdi->owner = owner;
919         get_device(owner);
920         return 0;
921 }
922 EXPORT_SYMBOL(bdi_register_owner);
923
924 /*
925  * Remove bdi from bdi_list, and ensure that it is no longer visible
926  */
927 static void bdi_remove_from_list(struct backing_dev_info *bdi)
928 {
929         spin_lock_bh(&bdi_lock);
930         list_del_rcu(&bdi->bdi_list);
931         spin_unlock_bh(&bdi_lock);
932
933         synchronize_rcu_expedited();
934 }
935
936 void bdi_unregister(struct backing_dev_info *bdi)
937 {
938         /* make sure nobody finds us on the bdi_list anymore */
939         bdi_remove_from_list(bdi);
940         wb_shutdown(&bdi->wb);
941         cgwb_bdi_unregister(bdi);
942
943         if (bdi->dev) {
944                 bdi_debug_unregister(bdi);
945                 device_unregister(bdi->dev);
946                 bdi->dev = NULL;
947         }
948
949         if (bdi->owner) {
950                 put_device(bdi->owner);
951                 bdi->owner = NULL;
952         }
953 }
954
955 static void release_bdi(struct kref *ref)
956 {
957         struct backing_dev_info *bdi =
958                         container_of(ref, struct backing_dev_info, refcnt);
959
960         if (test_bit(WB_registered, &bdi->wb.state))
961                 bdi_unregister(bdi);
962         WARN_ON_ONCE(bdi->dev);
963         wb_exit(&bdi->wb);
964         cgwb_bdi_exit(bdi);
965         kfree(bdi);
966 }
967
968 void bdi_put(struct backing_dev_info *bdi)
969 {
970         kref_put(&bdi->refcnt, release_bdi);
971 }
972 EXPORT_SYMBOL(bdi_put);
973
974 static wait_queue_head_t congestion_wqh[2] = {
975                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
976                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
977         };
978 static atomic_t nr_wb_congested[2];
979
980 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
981 {
982         wait_queue_head_t *wqh = &congestion_wqh[sync];
983         enum wb_congested_state bit;
984
985         bit = sync ? WB_sync_congested : WB_async_congested;
986         if (test_and_clear_bit(bit, &congested->state))
987                 atomic_dec(&nr_wb_congested[sync]);
988         smp_mb__after_atomic();
989         if (waitqueue_active(wqh))
990                 wake_up(wqh);
991 }
992 EXPORT_SYMBOL(clear_wb_congested);
993
994 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
995 {
996         enum wb_congested_state bit;
997
998         bit = sync ? WB_sync_congested : WB_async_congested;
999         if (!test_and_set_bit(bit, &congested->state))
1000                 atomic_inc(&nr_wb_congested[sync]);
1001 }
1002 EXPORT_SYMBOL(set_wb_congested);
1003
1004 /**
1005  * congestion_wait - wait for a backing_dev to become uncongested
1006  * @sync: SYNC or ASYNC IO
1007  * @timeout: timeout in jiffies
1008  *
1009  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1010  * write congestion.  If no backing_devs are congested then just wait for the
1011  * next write to be completed.
1012  */
1013 long congestion_wait(int sync, long timeout)
1014 {
1015         long ret;
1016         unsigned long start = jiffies;
1017         DEFINE_WAIT(wait);
1018         wait_queue_head_t *wqh = &congestion_wqh[sync];
1019
1020         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1021         ret = io_schedule_timeout(timeout);
1022         finish_wait(wqh, &wait);
1023
1024         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1025                                         jiffies_to_usecs(jiffies - start));
1026
1027         return ret;
1028 }
1029 EXPORT_SYMBOL(congestion_wait);
1030
1031 /**
1032  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1033  * @pgdat: A pgdat to check if it is heavily congested
1034  * @sync: SYNC or ASYNC IO
1035  * @timeout: timeout in jiffies
1036  *
1037  * In the event of a congested backing_dev (any backing_dev) and the given
1038  * @pgdat has experienced recent congestion, this waits for up to @timeout
1039  * jiffies for either a BDI to exit congestion of the given @sync queue
1040  * or a write to complete.
1041  *
1042  * In the absence of pgdat congestion, cond_resched() is called to yield
1043  * the processor if necessary but otherwise does not sleep.
1044  *
1045  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1046  * it is the number of jiffies that were still remaining when the function
1047  * returned. return_value == timeout implies the function did not sleep.
1048  */
1049 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1050 {
1051         long ret;
1052         unsigned long start = jiffies;
1053         DEFINE_WAIT(wait);
1054         wait_queue_head_t *wqh = &congestion_wqh[sync];
1055
1056         /*
1057          * If there is no congestion, or heavy congestion is not being
1058          * encountered in the current pgdat, yield if necessary instead
1059          * of sleeping on the congestion queue
1060          */
1061         if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1062             !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1063                 cond_resched();
1064
1065                 /* In case we scheduled, work out time remaining */
1066                 ret = timeout - (jiffies - start);
1067                 if (ret < 0)
1068                         ret = 0;
1069
1070                 goto out;
1071         }
1072
1073         /* Sleep until uncongested or a write happens */
1074         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1075         ret = io_schedule_timeout(timeout);
1076         finish_wait(wqh, &wait);
1077
1078 out:
1079         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1080                                         jiffies_to_usecs(jiffies - start));
1081
1082         return ret;
1083 }
1084 EXPORT_SYMBOL(wait_iff_congested);
1085
1086 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1087                         void __user *buffer, size_t *lenp, loff_t *ppos)
1088 {
1089         char kbuf[] = "0\n";
1090
1091         if (*ppos || *lenp < sizeof(kbuf)) {
1092                 *lenp = 0;
1093                 return 0;
1094         }
1095
1096         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1097                 return -EFAULT;
1098         pr_warn_once("%s exported in /proc is scheduled for removal\n",
1099                      table->procname);
1100
1101         *lenp = 2;
1102         *ppos += *lenp;
1103         return 2;
1104 }