Merge branches 'acpi-video' and 'acpi-hotplug'
[platform/kernel/linux-starfive.git] / block / blk-mq-cpumap.c
1 /*
2  * CPU <-> hardware queue mapping helpers
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  */
6 #include <linux/kernel.h>
7 #include <linux/threads.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/cpu.h>
12
13 #include <linux/blk-mq.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16
17 static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
18                               const int cpu)
19 {
20         return cpu * nr_queues / nr_cpus;
21 }
22
23 static int get_first_sibling(unsigned int cpu)
24 {
25         unsigned int ret;
26
27         ret = cpumask_first(topology_sibling_cpumask(cpu));
28         if (ret < nr_cpu_ids)
29                 return ret;
30
31         return cpu;
32 }
33
34 int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
35                             const struct cpumask *online_mask)
36 {
37         unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
38         cpumask_var_t cpus;
39
40         if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
41                 return 1;
42
43         cpumask_clear(cpus);
44         nr_cpus = nr_uniq_cpus = 0;
45         for_each_cpu(i, online_mask) {
46                 nr_cpus++;
47                 first_sibling = get_first_sibling(i);
48                 if (!cpumask_test_cpu(first_sibling, cpus))
49                         nr_uniq_cpus++;
50                 cpumask_set_cpu(i, cpus);
51         }
52
53         queue = 0;
54         for_each_possible_cpu(i) {
55                 if (!cpumask_test_cpu(i, online_mask)) {
56                         map[i] = 0;
57                         continue;
58                 }
59
60                 /*
61                  * Easy case - we have equal or more hardware queues. Or
62                  * there are no thread siblings to take into account. Do
63                  * 1:1 if enough, or sequential mapping if less.
64                  */
65                 if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
66                         map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
67                         queue++;
68                         continue;
69                 }
70
71                 /*
72                  * Less then nr_cpus queues, and we have some number of
73                  * threads per cores. Map sibling threads to the same
74                  * queue.
75                  */
76                 first_sibling = get_first_sibling(i);
77                 if (first_sibling == i) {
78                         map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
79                                                         queue);
80                         queue++;
81                 } else
82                         map[i] = map[first_sibling];
83         }
84
85         free_cpumask_var(cpus);
86         return 0;
87 }
88
89 unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
90 {
91         unsigned int *map;
92
93         /* If cpus are offline, map them to first hctx */
94         map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
95                                 set->numa_node);
96         if (!map)
97                 return NULL;
98
99         if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
100                 return map;
101
102         kfree(map);
103         return NULL;
104 }
105
106 /*
107  * We have no quick way of doing reverse lookups. This is only used at
108  * queue init time, so runtime isn't important.
109  */
110 int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
111 {
112         int i;
113
114         for_each_possible_cpu(i) {
115                 if (index == mq_map[i])
116                         return local_memory_node(cpu_to_node(i));
117         }
118
119         return NUMA_NO_NODE;
120 }