1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* FS-Cache worker operation management routines
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 * See Documentation/filesystems/caching/operations.rst
10 #define FSCACHE_DEBUG_LEVEL OPERATION
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
16 atomic_t fscache_op_debug_id;
17 EXPORT_SYMBOL(fscache_op_debug_id);
19 static void fscache_operation_dummy_cancel(struct fscache_operation *op)
24 * fscache_operation_init - Do basic initialisation of an operation
25 * @cookie: The cookie to operate on
26 * @op: The operation to initialise
27 * @processor: The function to perform the operation
28 * @cancel: A function to handle operation cancellation
29 * @release: The release function to assign
31 * Do basic initialisation of an operation. The caller must still set flags,
32 * object and processor if needed.
34 void fscache_operation_init(struct fscache_cookie *cookie,
35 struct fscache_operation *op,
36 fscache_operation_processor_t processor,
37 fscache_operation_cancel_t cancel,
38 fscache_operation_release_t release)
40 INIT_WORK(&op->work, fscache_op_work_func);
41 atomic_set(&op->usage, 1);
42 op->state = FSCACHE_OP_ST_INITIALISED;
43 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
44 op->processor = processor;
45 op->cancel = cancel ?: fscache_operation_dummy_cancel;
46 op->release = release;
47 INIT_LIST_HEAD(&op->pend_link);
48 fscache_stat(&fscache_n_op_initialised);
49 trace_fscache_op(cookie, op, fscache_op_init);
51 EXPORT_SYMBOL(fscache_operation_init);
54 * fscache_enqueue_operation - Enqueue an operation for processing
55 * @op: The operation to enqueue
57 * Enqueue an operation for processing by the FS-Cache thread pool.
59 * This will get its own ref on the object.
61 void fscache_enqueue_operation(struct fscache_operation *op)
63 struct fscache_cookie *cookie = op->object->cookie;
65 _enter("{OBJ%x OP%x,%u}",
66 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
68 ASSERT(list_empty(&op->pend_link));
69 ASSERT(op->processor != NULL);
70 ASSERT(fscache_object_is_available(op->object));
71 ASSERTCMP(atomic_read(&op->usage), >, 0);
72 ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
73 op->state, ==, FSCACHE_OP_ST_CANCELLED);
75 fscache_stat(&fscache_n_op_enqueue);
76 switch (op->flags & FSCACHE_OP_TYPE) {
77 case FSCACHE_OP_ASYNC:
78 trace_fscache_op(cookie, op, fscache_op_enqueue_async);
79 _debug("queue async");
80 atomic_inc(&op->usage);
81 if (!queue_work(fscache_op_wq, &op->work))
82 fscache_put_operation(op);
84 case FSCACHE_OP_MYTHREAD:
85 trace_fscache_op(cookie, op, fscache_op_enqueue_mythread);
86 _debug("queue for caller's attention");
89 pr_err("Unexpected op type %lx", op->flags);
94 EXPORT_SYMBOL(fscache_enqueue_operation);
99 static void fscache_run_op(struct fscache_object *object,
100 struct fscache_operation *op)
102 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
104 op->state = FSCACHE_OP_ST_IN_PROGRESS;
105 object->n_in_progress++;
106 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
107 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
109 fscache_enqueue_operation(op);
111 trace_fscache_op(object->cookie, op, fscache_op_run);
112 fscache_stat(&fscache_n_op_run);
116 * report an unexpected submission
118 static void fscache_report_unexpected_submission(struct fscache_object *object,
119 struct fscache_operation *op,
120 const struct fscache_state *ostate)
122 static bool once_only;
123 struct fscache_operation *p;
130 kdebug("unexpected submission OP%x [OBJ%x %s]",
131 op->debug_id, object->debug_id, object->state->name);
132 kdebug("objstate=%s [%s]", object->state->name, ostate->name);
133 kdebug("objflags=%lx", object->flags);
134 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
135 kdebug("ops=%u inp=%u exc=%u",
136 object->n_ops, object->n_in_progress, object->n_exclusive);
138 if (!list_empty(&object->pending_ops)) {
140 list_for_each_entry(p, &object->pending_ops, pend_link) {
141 ASSERTCMP(p->object, ==, object);
142 kdebug("%p %p", op->processor, op->release);
153 * submit an exclusive operation for an object
154 * - other ops are excluded from running simultaneously with this one
155 * - this gets any extra refs it needs on an op
157 int fscache_submit_exclusive_op(struct fscache_object *object,
158 struct fscache_operation *op)
160 const struct fscache_state *ostate;
164 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
166 trace_fscache_op(object->cookie, op, fscache_op_submit_ex);
168 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
169 ASSERTCMP(atomic_read(&op->usage), >, 0);
171 spin_lock(&object->lock);
172 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
173 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
174 ASSERT(list_empty(&op->pend_link));
176 ostate = object->state;
179 op->state = FSCACHE_OP_ST_PENDING;
180 flags = READ_ONCE(object->flags);
181 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
182 fscache_stat(&fscache_n_op_rejected);
184 op->state = FSCACHE_OP_ST_CANCELLED;
186 } else if (unlikely(fscache_cache_is_broken(object))) {
188 op->state = FSCACHE_OP_ST_CANCELLED;
190 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
193 object->n_exclusive++; /* reads and writes must wait */
195 if (object->n_in_progress > 0) {
196 atomic_inc(&op->usage);
197 list_add_tail(&op->pend_link, &object->pending_ops);
198 fscache_stat(&fscache_n_op_pend);
199 } else if (!list_empty(&object->pending_ops)) {
200 atomic_inc(&op->usage);
201 list_add_tail(&op->pend_link, &object->pending_ops);
202 fscache_stat(&fscache_n_op_pend);
203 fscache_start_operations(object);
205 ASSERTCMP(object->n_in_progress, ==, 0);
206 fscache_run_op(object, op);
209 /* need to issue a new write op after this */
210 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
212 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
215 object->n_exclusive++; /* reads and writes must wait */
216 atomic_inc(&op->usage);
217 list_add_tail(&op->pend_link, &object->pending_ops);
218 fscache_stat(&fscache_n_op_pend);
220 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
222 op->state = FSCACHE_OP_ST_CANCELLED;
225 fscache_report_unexpected_submission(object, op, ostate);
227 op->state = FSCACHE_OP_ST_CANCELLED;
231 spin_unlock(&object->lock);
236 * submit an operation for an object
237 * - objects may be submitted only in the following states:
238 * - during object creation (write ops may be submitted)
239 * - whilst the object is active
240 * - after an I/O error incurred in one of the two above states (op rejected)
241 * - this gets any extra refs it needs on an op
243 int fscache_submit_op(struct fscache_object *object,
244 struct fscache_operation *op)
246 const struct fscache_state *ostate;
250 _enter("{OBJ%x OP%x},{%u}",
251 object->debug_id, op->debug_id, atomic_read(&op->usage));
253 trace_fscache_op(object->cookie, op, fscache_op_submit);
255 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
256 ASSERTCMP(atomic_read(&op->usage), >, 0);
258 spin_lock(&object->lock);
259 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
260 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
261 ASSERT(list_empty(&op->pend_link));
263 ostate = object->state;
266 op->state = FSCACHE_OP_ST_PENDING;
267 flags = READ_ONCE(object->flags);
268 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
269 fscache_stat(&fscache_n_op_rejected);
271 op->state = FSCACHE_OP_ST_CANCELLED;
273 } else if (unlikely(fscache_cache_is_broken(object))) {
275 op->state = FSCACHE_OP_ST_CANCELLED;
277 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
281 if (object->n_exclusive > 0) {
282 atomic_inc(&op->usage);
283 list_add_tail(&op->pend_link, &object->pending_ops);
284 fscache_stat(&fscache_n_op_pend);
285 } else if (!list_empty(&object->pending_ops)) {
286 atomic_inc(&op->usage);
287 list_add_tail(&op->pend_link, &object->pending_ops);
288 fscache_stat(&fscache_n_op_pend);
289 fscache_start_operations(object);
291 ASSERTCMP(object->n_exclusive, ==, 0);
292 fscache_run_op(object, op);
295 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
298 atomic_inc(&op->usage);
299 list_add_tail(&op->pend_link, &object->pending_ops);
300 fscache_stat(&fscache_n_op_pend);
302 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
304 op->state = FSCACHE_OP_ST_CANCELLED;
307 fscache_report_unexpected_submission(object, op, ostate);
308 ASSERT(!fscache_object_is_active(object));
310 op->state = FSCACHE_OP_ST_CANCELLED;
314 spin_unlock(&object->lock);
319 * queue an object for withdrawal on error, aborting all following asynchronous
322 void fscache_abort_object(struct fscache_object *object)
324 _enter("{OBJ%x}", object->debug_id);
326 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
330 * Jump start the operation processing on an object. The caller must hold
333 void fscache_start_operations(struct fscache_object *object)
335 struct fscache_operation *op;
338 while (!list_empty(&object->pending_ops) && !stop) {
339 op = list_entry(object->pending_ops.next,
340 struct fscache_operation, pend_link);
342 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
343 if (object->n_in_progress > 0)
347 list_del_init(&op->pend_link);
348 fscache_run_op(object, op);
350 /* the pending queue was holding a ref on the object */
351 fscache_put_operation(op);
354 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
356 _debug("woke %d ops on OBJ%x",
357 object->n_in_progress, object->debug_id);
361 * cancel an operation that's pending on an object
363 int fscache_cancel_op(struct fscache_operation *op,
364 bool cancel_in_progress_op)
366 struct fscache_object *object = op->object;
370 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
372 trace_fscache_op(object->cookie, op, fscache_op_cancel);
374 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
375 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
376 ASSERTCMP(atomic_read(&op->usage), >, 0);
378 spin_lock(&object->lock);
381 if (op->state == FSCACHE_OP_ST_PENDING) {
382 ASSERT(!list_empty(&op->pend_link));
383 list_del_init(&op->pend_link);
386 fscache_stat(&fscache_n_op_cancelled);
388 op->state = FSCACHE_OP_ST_CANCELLED;
389 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
390 object->n_exclusive--;
391 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
392 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
394 } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
395 ASSERTCMP(object->n_in_progress, >, 0);
396 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
397 object->n_exclusive--;
398 object->n_in_progress--;
399 if (object->n_in_progress == 0)
400 fscache_start_operations(object);
402 fscache_stat(&fscache_n_op_cancelled);
404 op->state = FSCACHE_OP_ST_CANCELLED;
405 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
406 object->n_exclusive--;
407 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
408 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
413 fscache_put_operation(op);
414 spin_unlock(&object->lock);
415 _leave(" = %d", ret);
420 * Cancel all pending operations on an object
422 void fscache_cancel_all_ops(struct fscache_object *object)
424 struct fscache_operation *op;
426 _enter("OBJ%x", object->debug_id);
428 spin_lock(&object->lock);
430 while (!list_empty(&object->pending_ops)) {
431 op = list_entry(object->pending_ops.next,
432 struct fscache_operation, pend_link);
433 fscache_stat(&fscache_n_op_cancelled);
434 list_del_init(&op->pend_link);
436 trace_fscache_op(object->cookie, op, fscache_op_cancel_all);
438 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
440 op->state = FSCACHE_OP_ST_CANCELLED;
442 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
443 object->n_exclusive--;
444 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
445 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
446 fscache_put_operation(op);
447 cond_resched_lock(&object->lock);
450 spin_unlock(&object->lock);
455 * Record the completion or cancellation of an in-progress operation.
457 void fscache_op_complete(struct fscache_operation *op, bool cancelled)
459 struct fscache_object *object = op->object;
461 _enter("OBJ%x", object->debug_id);
463 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
464 ASSERTCMP(object->n_in_progress, >, 0);
465 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
466 object->n_exclusive, >, 0);
467 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
468 object->n_in_progress, ==, 1);
470 spin_lock(&object->lock);
473 trace_fscache_op(object->cookie, op, fscache_op_completed);
474 op->state = FSCACHE_OP_ST_COMPLETE;
477 trace_fscache_op(object->cookie, op, fscache_op_cancelled);
478 op->state = FSCACHE_OP_ST_CANCELLED;
481 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
482 object->n_exclusive--;
483 object->n_in_progress--;
484 if (object->n_in_progress == 0)
485 fscache_start_operations(object);
487 spin_unlock(&object->lock);
490 EXPORT_SYMBOL(fscache_op_complete);
493 * release an operation
494 * - queues pending ops if this is the last in-progress op
496 void fscache_put_operation(struct fscache_operation *op)
498 struct fscache_object *object;
499 struct fscache_cache *cache;
501 _enter("{OBJ%x OP%x,%d}",
502 op->object ? op->object->debug_id : 0,
503 op->debug_id, atomic_read(&op->usage));
505 ASSERTCMP(atomic_read(&op->usage), >, 0);
507 if (!atomic_dec_and_test(&op->usage))
510 trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put);
513 ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
514 op->state != FSCACHE_OP_ST_COMPLETE,
515 op->state, ==, FSCACHE_OP_ST_CANCELLED);
517 fscache_stat(&fscache_n_op_release);
523 op->state = FSCACHE_OP_ST_DEAD;
526 if (likely(object)) {
527 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
528 atomic_dec(&object->n_reads);
529 if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
530 fscache_unuse_cookie(object);
532 /* now... we may get called with the object spinlock held, so we
533 * complete the cleanup here only if we can immediately acquire the
534 * lock, and defer it otherwise */
535 if (!spin_trylock(&object->lock)) {
537 fscache_stat(&fscache_n_op_deferred_release);
539 cache = object->cache;
540 spin_lock(&cache->op_gc_list_lock);
541 list_add_tail(&op->pend_link, &cache->op_gc_list);
542 spin_unlock(&cache->op_gc_list_lock);
543 schedule_work(&cache->op_gc);
548 ASSERTCMP(object->n_ops, >, 0);
550 if (object->n_ops == 0)
551 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
553 spin_unlock(&object->lock);
559 EXPORT_SYMBOL(fscache_put_operation);
562 * garbage collect operations that have had their release deferred
564 void fscache_operation_gc(struct work_struct *work)
566 struct fscache_operation *op;
567 struct fscache_object *object;
568 struct fscache_cache *cache =
569 container_of(work, struct fscache_cache, op_gc);
575 spin_lock(&cache->op_gc_list_lock);
576 if (list_empty(&cache->op_gc_list)) {
577 spin_unlock(&cache->op_gc_list_lock);
581 op = list_entry(cache->op_gc_list.next,
582 struct fscache_operation, pend_link);
583 list_del(&op->pend_link);
584 spin_unlock(&cache->op_gc_list_lock);
587 trace_fscache_op(object->cookie, op, fscache_op_gc);
589 spin_lock(&object->lock);
591 _debug("GC DEFERRED REL OBJ%x OP%x",
592 object->debug_id, op->debug_id);
593 fscache_stat(&fscache_n_op_gc);
595 ASSERTCMP(atomic_read(&op->usage), ==, 0);
596 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
598 ASSERTCMP(object->n_ops, >, 0);
600 if (object->n_ops == 0)
601 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
603 spin_unlock(&object->lock);
606 } while (count++ < 20);
608 if (!list_empty(&cache->op_gc_list))
609 schedule_work(&cache->op_gc);
615 * execute an operation using fs_op_wq to provide processing context -
616 * the caller holds a ref to this object, so we don't need to hold one
618 void fscache_op_work_func(struct work_struct *work)
620 struct fscache_operation *op =
621 container_of(work, struct fscache_operation, work);
623 _enter("{OBJ%x OP%x,%d}",
624 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
626 trace_fscache_op(op->object->cookie, op, fscache_op_work);
628 ASSERT(op->processor != NULL);
630 fscache_put_operation(op);