spin_unlock_bh(&wb->work_lock);
}
-static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
- bool range_cyclic, enum wb_reason reason)
-{
- struct wb_writeback_work *work;
-
- if (!wb_has_dirty_io(wb))
- return;
-
- /*
- * This is WB_SYNC_NONE writeback, so if allocation fails just
- * wakeup the thread for old dirty data writeback
- */
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- trace_writeback_nowork(wb->bdi);
- wb_wakeup(wb);
- return;
- }
-
- work->sync_mode = WB_SYNC_NONE;
- work->nr_pages = nr_pages;
- work->range_cyclic = range_cyclic;
- work->reason = reason;
-
- wb_queue_work(wb, work);
-}
-
#ifdef CONFIG_CGROUP_WRITEBACK
/**
#endif /* CONFIG_CGROUP_WRITEBACK */
-/**
- * bdi_start_writeback - start writeback
- * @bdi: the backing device to write from
- * @nr_pages: the number of pages to write
- * @reason: reason why some writeback work was initiated
- *
- * Description:
- * This does WB_SYNC_NONE opportunistic writeback. The IO is only
- * started when this function returns, we make no guarantees on
- * completion. Caller need not hold sb s_umount semaphore.
- *
- */
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
- enum wb_reason reason)
+void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
+ bool range_cyclic, enum wb_reason reason)
{
- __wb_start_writeback(&bdi->wb, nr_pages, true, reason);
+ struct wb_writeback_work *work;
+
+ if (!wb_has_dirty_io(wb))
+ return;
+
+ /*
+ * This is WB_SYNC_NONE writeback, so if allocation fails just
+ * wakeup the thread for old dirty data writeback
+ */
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ trace_writeback_nowork(wb->bdi);
+ wb_wakeup(wb);
+ return;
+ }
+
+ work->sync_mode = WB_SYNC_NONE;
+ work->nr_pages = nr_pages;
+ work->range_cyclic = range_cyclic;
+ work->reason = reason;
+
+ wb_queue_work(wb, work);
}
/**
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
- __wb_start_writeback(&bdi->wb, nr_pages, false, reason);
+ wb_start_writeback(&bdi->wb, nr_pages, false, reason);
rcu_read_unlock();
}
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
- enum wb_reason reason);
+void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
+ bool range_cyclic, enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
void wb_workfn(struct work_struct *work);
void wb_wakeup_delayed(struct bdi_writeback *wb);