From: Linus Torvalds Date: Sat, 7 Aug 2010 19:42:58 +0000 (-0700) Subject: Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq X-Git-Tag: v3.0~4085 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3b7433b8a8a83c87972065b1852b7dcae691e464;p=platform%2Fkernel%2Flinux-amlogic.git Merge branch 'for-linus' of git://git./linux/kernel/git/tj/wq * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (55 commits) workqueue: mark init_workqueues() as early_initcall() workqueue: explain for_each_*cwq_cpu() iterators fscache: fix build on !CONFIG_SYSCTL slow-work: kill it gfs2: use workqueue instead of slow-work drm: use workqueue instead of slow-work cifs: use workqueue instead of slow-work fscache: drop references to slow-work fscache: convert operation to use workqueue instead of slow-work fscache: convert object to use workqueue instead of slow-work workqueue: fix how cpu number is stored in work->data workqueue: fix mayday_mask handling on UP workqueue: fix build problem on !CONFIG_SMP workqueue: fix locking in retry path of maybe_create_worker() async: use workqueue for worker pool workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead workqueue: implement unbound workqueue workqueue: prepare for WQ_UNBOUND implementation libata: take advantage of cmwq and remove concurrency limitations workqueue: fix worker management invocation without pending works ... Fixed up conflicts in fs/cifs/* as per Tejun. Other trivial conflicts in include/linux/workqueue.h, kernel/trace/Kconfig and kernel/workqueue.c --- 3b7433b8a8a83c87972065b1852b7dcae691e464 diff --cc fs/cifs/cifsglob.h index 5990614,f5a1f9b..0cdfb8c --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@@ -728,6 -732,6 +728,10 @@@ GLOBAL_EXTERN unsigned int cifs_min_rcv GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ + void cifs_oplock_break(struct work_struct *work); + void cifs_oplock_break_get(struct cifsFileInfo *cfile); + void cifs_oplock_break_put(struct cifsFileInfo *cfile); ++ +extern const struct slow_work_ops cifs_oplock_break_ops; + +#endif /* _CIFS_GLOB_H */ diff --cc fs/cifs/file.c index fa04a00d,e767bfa..db11fde --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@@ -2291,24 -2295,7 +2291,23 @@@ out return rc; } +static int cifs_release_page(struct page *page, gfp_t gfp) +{ + if (PagePrivate(page)) + return 0; + + return cifs_fscache_release_page(page, gfp); +} + +static void cifs_invalidate_page(struct page *page, unsigned long offset) +{ + struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); + + if (offset == 0) + cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); +} + - static void - cifs_oplock_break(struct slow_work *work) + void cifs_oplock_break(struct work_struct *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); diff --cc include/linux/workqueue.h index d0f7c81,51dc9a7..4f9d277b --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@@ -298,7 -395,10 +395,14 @@@ static inline long work_on_cpu(unsigne long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); #endif /* CONFIG_SMP */ + #ifdef CONFIG_FREEZER + extern void freeze_workqueues_begin(void); + extern bool freeze_workqueues_busy(void); + extern void thaw_workqueues(void); + #endif /* CONFIG_FREEZER */ + +#ifdef CONFIG_LOCKDEP +int in_workqueue_context(struct workqueue_struct *wq); +#endif ++ #endif diff --cc kernel/trace/Kconfig index 6eb97bb,a0d95c1..538501c --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@@ -323,17 -371,26 +323,6 @@@ config STACK_TRACE Say N if unsure. - config WORKQUEUE_TRACER - bool "Trace workqueues" -config KMEMTRACE - bool "Trace SLAB allocations" -- select GENERIC_TRACER -- help - The workqueue tracer provides some statistical information - about each cpu workqueue thread such as the number of the - works inserted and executed since their creation. It can help - to evaluate the amount of work each of them has to perform. - For example it can help a developer to decide whether he should - choose a per-cpu workqueue instead of a singlethreaded one. - kmemtrace provides tracing for slab allocator functions, such as - kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected - data is then fed to the userspace application in order to analyse - allocation hotspots, internal fragmentation and so on, making it - possible to see how well an allocator performs, as well as debug - and profile kernel code. - - This requires an userspace application to use. See - Documentation/trace/kmemtrace.txt for more information. - - Saying Y will make the kernel somewhat larger and slower. However, - if you disable kmemtrace at run-time or boot-time, the performance - impact is minimal (depending on the arch the kernel is built for). - - If unsure, say N. -- config BLK_DEV_IO_TRACE bool "Support for tracing block IO actions" depends on SYSFS diff --cc kernel/workqueue.c index 59fef15,e2eb351..9ca34cd --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@@ -68,21 -237,68 +237,83 @@@ struct workqueue_struct #endif }; + struct workqueue_struct *system_wq __read_mostly; + struct workqueue_struct *system_long_wq __read_mostly; + struct workqueue_struct *system_nrt_wq __read_mostly; + struct workqueue_struct *system_unbound_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_wq); + EXPORT_SYMBOL_GPL(system_long_wq); + EXPORT_SYMBOL_GPL(system_nrt_wq); + EXPORT_SYMBOL_GPL(system_unbound_wq); + + #define for_each_busy_worker(worker, i, pos, gcwq) \ + for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ + hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) + + static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, + unsigned int sw) + { + if (cpu < nr_cpu_ids) { + if (sw & 1) { + cpu = cpumask_next(cpu, mask); + if (cpu < nr_cpu_ids) + return cpu; + } + if (sw & 2) + return WORK_CPU_UNBOUND; + } + return WORK_CPU_NONE; + } + + static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, + struct workqueue_struct *wq) + { + return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); + } + + /* + * CPU iterators + * + * An extra gcwq is defined for an invalid cpu number + * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any + * specific CPU. The following iterators are similar to + * for_each_*_cpu() iterators but also considers the unbound gcwq. + * + * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND + * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND + * for_each_cwq_cpu() : possible CPUs for bound workqueues, + * WORK_CPU_UNBOUND for unbound workqueues + */ + #define for_each_gcwq_cpu(cpu) \ + for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) + + #define for_each_online_gcwq_cpu(cpu) \ + for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) + + #define for_each_cwq_cpu(cpu, wq) \ + for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) + +#ifdef CONFIG_LOCKDEP +/** + * in_workqueue_context() - in context of specified workqueue? + * @wq: the workqueue of interest + * + * Checks lockdep state to see if the current task is executing from + * within a workqueue item. This function exists only if lockdep is + * enabled. + */ +int in_workqueue_context(struct workqueue_struct *wq) +{ + return lock_is_held(&wq->lockdep_map); +} +#endif + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr;