cgroup_freezer: fix freezing groups with stopped tasks
[platform/adaptation/renesas_rcar/renesas_kernel.git] / block / blk-core.c
index 3508751..ea70e6c 100644 (file)
@@ -349,11 +349,13 @@ EXPORT_SYMBOL(blk_put_queue);
 /**
  * blk_drain_queue - drain requests from request_queue
  * @q: queue to drain
+ * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
  *
- * Drain ELV_PRIV requests from @q.  The caller is responsible for ensuring
- * that no new requests which need to be drained are queued.
+ * Drain requests from @q.  If @drain_all is set, all requests are drained.
+ * If not, only ELVPRIV requests are drained.  The caller is responsible
+ * for ensuring that no new requests which need to be drained are queued.
  */
-void blk_drain_queue(struct request_queue *q)
+void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
        while (true) {
                int nr_rqs;
@@ -361,9 +363,15 @@ void blk_drain_queue(struct request_queue *q)
                spin_lock_irq(q->queue_lock);
 
                elv_drain_elevator(q);
+               if (drain_all)
+                       blk_throtl_drain(q);
 
                __blk_run_queue(q);
-               nr_rqs = q->rq.elvpriv;
+
+               if (drain_all)
+                       nr_rqs = q->rq.count[0] + q->rq.count[1];
+               else
+                       nr_rqs = q->rq.elvpriv;
 
                spin_unlock_irq(q->queue_lock);
 
@@ -373,30 +381,45 @@ void blk_drain_queue(struct request_queue *q)
        }
 }
 
-/*
- * Note: If a driver supplied the queue lock, it is disconnected
- * by this function. The actual state of the lock doesn't matter
- * here as the request_queue isn't accessible after this point
- * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
+/**
+ * blk_cleanup_queue - shutdown a request queue
+ * @q: request queue to shutdown
+ *
+ * Mark @q DEAD, drain all pending requests, destroy and put it.  All
+ * future requests will be failed immediately with -ENODEV.
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
-       /*
-        * We know we have process context here, so we can be a little
-        * cautious and ensure that pending block actions on this device
-        * are done before moving on. Going into this function, we should
-        * not have processes doing IO to this device.
-        */
-       blk_sync_queue(q);
+       spinlock_t *lock = q->queue_lock;
 
-       del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+       /* mark @q DEAD, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-       mutex_unlock(&q->sysfs_lock);
+
+       spin_lock_irq(lock);
+       queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+       queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+       queue_flag_set(QUEUE_FLAG_DEAD, q);
 
        if (q->queue_lock != &q->__queue_lock)
                q->queue_lock = &q->__queue_lock;
 
+       spin_unlock_irq(lock);
+       mutex_unlock(&q->sysfs_lock);
+
+       /*
+        * Drain all requests queued before DEAD marking.  The caller might
+        * be trying to tear down @q before its elevator is initialized, in
+        * which case we don't want to call into draining.
+        */
+       if (q->elevator)
+               blk_drain_queue(q, true);
+
+       /* @q won't process any more request, flush async actions */
+       del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+       blk_sync_queue(q);
+
+       /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
 EXPORT_SYMBOL(blk_cleanup_queue);
@@ -1203,18 +1226,32 @@ static bool bio_attempt_front_merge(struct request_queue *q,
        return true;
 }
 
-/*
- * Attempts to merge with the plugged list in the current process. Returns
- * true if merge was successful, otherwise false.
+/**
+ * attempt_plug_merge - try to merge with %current's plugged list
+ * @q: request_queue new bio is being queued at
+ * @bio: new bio being queued
+ * @request_count: out parameter for number of traversed plugged requests
+ *
+ * Determine whether @bio being queued on @q can be merged with a request
+ * on %current's plugged list.  Returns %true if merge was successful,
+ * otherwise %false.
+ *
+ * This function is called without @q->queue_lock; however, elevator is
+ * accessed iff there already are requests on the plugged list which in
+ * turn guarantees validity of the elevator.
+ *
+ * Note that, on successful merge, elevator operation
+ * elevator_bio_merged_fn() will be called without queue lock.  Elevator
+ * must be ready for this.
  */
-static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
-                              struct bio *bio, unsigned int *request_count)
+static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
+                              unsigned int *request_count)
 {
        struct blk_plug *plug;
        struct request *rq;
        bool ret = false;
 
-       plug = tsk->plug;
+       plug = current->plug;
        if (!plug)
                goto out;
        *request_count = 0;
@@ -1244,7 +1281,6 @@ out:
 
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
-       req->cpu = bio->bi_comp_cpu;
        req->cmd_type = REQ_TYPE_FS;
 
        req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
@@ -1282,7 +1318,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (attempt_plug_merge(current, q, bio, &request_count))
+       if (attempt_plug_merge(q, bio, &request_count))
                return;
 
        spin_lock_irq(q->queue_lock);
@@ -1330,8 +1366,7 @@ get_rq:
         */
        init_request_from_bio(req, bio);
 
-       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
-           bio_flagged(bio, BIO_CPU_AFFINE))
+       if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
                req->cpu = raw_smp_processor_id();
 
        plug = current->plug;
@@ -1344,15 +1379,19 @@ get_rq:
                 */
                if (list_empty(&plug->list))
                        trace_block_plug(q);
-               else if (!plug->should_sort) {
-                       struct request *__rq;
+               else {
+                       if (!plug->should_sort) {
+                               struct request *__rq;
 
-                       __rq = list_entry_rq(plug->list.prev);
-                       if (__rq->q != q)
-                               plug->should_sort = 1;
+                               __rq = list_entry_rq(plug->list.prev);
+                               if (__rq->q != q)
+                                       plug->should_sort = 1;
+                       }
+                       if (request_count >= BLK_MAX_REQUEST_COUNT) {
+                               blk_flush_plug_list(plug, false);
+                               trace_block_plug(q);
+                       }
                }
-               if (request_count >= BLK_MAX_REQUEST_COUNT)
-                       blk_flush_plug_list(plug, false);
                list_add_tail(&req->queuelist, &plug->list);
                drive_stat_acct(req, 1);
        } else {
@@ -1495,9 +1534,6 @@ generic_make_request_checks(struct bio *bio)
                goto end_io;
        }
 
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
-               goto end_io;
-
        part = bio->bi_bdev->bd_part;
        if (should_fail_request(part, bio->bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
@@ -1742,6 +1778,8 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                where = ELEVATOR_INSERT_FLUSH;
 
        add_acct_request(q, rq, where);
+       if (where == ELEVATOR_INSERT_FLUSH)
+               __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
        return 0;