blk-cgroup: pass a gendisk to blkcg_schedule_throttle
authorChristoph Hellwig <hch@lst.de>
Wed, 21 Sep 2022 18:05:00 +0000 (20:05 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 27 Sep 2022 01:17:28 +0000 (19:17 -0600)
Pass the gendisk to blkcg_schedule_throttle as part of moving the
blk-cgroup infrastructure to be gendisk based.  Remove the unused
!BLK_CGROUP stub while we're at it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20220921180501.1539876-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-iocost.c
block/blk-iolatency.c
include/linux/blk-cgroup.h
mm/swapfile.c

index c2d5ca2..fc82057 100644 (file)
@@ -1792,13 +1792,13 @@ out:
 
 /**
  * blkcg_schedule_throttle - this task needs to check for throttling
- * @q: the request queue IO was submitted on
+ * @gendisk: disk to throttle
  * @use_memdelay: do we charge this to memory delay for PSI
  *
  * This is called by the IO controller when we know there's delay accumulated
  * for the blkg for this task.  We do not pass the blkg because there are places
  * we call this that may not have that information, the swapping code for
- * instance will only have a request_queue at that point.  This set's the
+ * instance will only have a block_device at that point.  This set's the
  * notify_resume for the task to check and see if it requires throttling before
  * returning to user space.
  *
@@ -1807,8 +1807,10 @@ out:
  * throttle once.  If the task needs to be throttled again it'll need to be
  * re-set at the next time we see the task.
  */
-void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
 {
+       struct request_queue *q = disk->queue;
+
        if (unlikely(current->flags & PF_KTHREAD))
                return;
 
index c0f69bc..4953964 100644 (file)
@@ -2636,7 +2636,7 @@ retry_lock:
        if (use_debt) {
                iocg_incur_debt(iocg, abs_cost, &now);
                if (iocg_kick_delay(iocg, &now))
-                       blkcg_schedule_throttle(rqos->q,
+                       blkcg_schedule_throttle(rqos->q->disk,
                                        (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
                iocg_unlock(iocg, ioc_locked, &flags);
                return;
@@ -2737,7 +2737,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
        if (likely(!list_empty(&iocg->active_list))) {
                iocg_incur_debt(iocg, abs_cost, &now);
                if (iocg_kick_delay(iocg, &now))
-                       blkcg_schedule_throttle(rqos->q,
+                       blkcg_schedule_throttle(rqos->q->disk,
                                        (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
        } else {
                iocg_commit_bio(iocg, bio, abs_cost, cost);
index c6f61fe..571fa95 100644 (file)
@@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
        unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
 
        if (use_delay)
-               blkcg_schedule_throttle(rqos->q, use_memdelay);
+               blkcg_schedule_throttle(rqos->q->disk, use_memdelay);
 
        /*
         * To avoid priority inversions we want to just take a slot if we are
index 9f40dbc..dd5841a 100644 (file)
 
 struct bio;
 struct cgroup_subsys_state;
-struct request_queue;
+struct gendisk;
 
 #define FC_APPID_LEN              129
 
 #ifdef CONFIG_BLK_CGROUP
 extern struct cgroup_subsys_state * const blkcg_root_css;
 
-void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
 void blkcg_maybe_throttle_current(void);
 bool blk_cgroup_congested(void);
 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
@@ -39,7 +39,6 @@ struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
 
 static inline void blkcg_maybe_throttle_current(void) { }
 static inline bool blk_cgroup_congested(void) { return false; }
-static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
 static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
 {
        return NULL;
index 1fdccd2..82e6200 100644 (file)
@@ -3655,7 +3655,7 @@ void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
        plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
                                  avail_lists[nid]) {
                if (si->bdev) {
-                       blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
+                       blkcg_schedule_throttle(si->bdev->bd_disk, true);
                        break;
                }
        }