Merge tag 'nfs-for-3.12-4' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / base / cpu.c
1 /*
2  * CPU subsystem support
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/topology.h>
11 #include <linux/device.h>
12 #include <linux/node.h>
13 #include <linux/gfp.h>
14 #include <linux/slab.h>
15 #include <linux/percpu.h>
16 #include <linux/acpi.h>
17 #include <linux/of.h>
18
19 #include "base.h"
20
21 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
22
23 static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
24 {
25         /* ACPI style match is the only one that may succeed. */
26         if (acpi_driver_match_device(dev, drv))
27                 return 1;
28
29         return 0;
30 }
31
32 #ifdef CONFIG_HOTPLUG_CPU
33 static void change_cpu_under_node(struct cpu *cpu,
34                         unsigned int from_nid, unsigned int to_nid)
35 {
36         int cpuid = cpu->dev.id;
37         unregister_cpu_under_node(cpuid, from_nid);
38         register_cpu_under_node(cpuid, to_nid);
39         cpu->node_id = to_nid;
40 }
41
42 static int __ref cpu_subsys_online(struct device *dev)
43 {
44         struct cpu *cpu = container_of(dev, struct cpu, dev);
45         int cpuid = dev->id;
46         int from_nid, to_nid;
47         int ret = -ENODEV;
48
49         cpu_hotplug_driver_lock();
50
51         from_nid = cpu_to_node(cpuid);
52         if (from_nid == NUMA_NO_NODE)
53                 goto out;
54
55         ret = cpu_up(cpuid);
56         /*
57          * When hot adding memory to memoryless node and enabling a cpu
58          * on the node, node number of the cpu may internally change.
59          */
60         to_nid = cpu_to_node(cpuid);
61         if (from_nid != to_nid)
62                 change_cpu_under_node(cpu, from_nid, to_nid);
63
64  out:
65         cpu_hotplug_driver_unlock();
66         return ret;
67 }
68
69 static int cpu_subsys_offline(struct device *dev)
70 {
71         int ret;
72
73         cpu_hotplug_driver_lock();
74         ret = cpu_down(dev->id);
75         cpu_hotplug_driver_unlock();
76         return ret;
77 }
78
79 void unregister_cpu(struct cpu *cpu)
80 {
81         int logical_cpu = cpu->dev.id;
82
83         unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
84
85         device_unregister(&cpu->dev);
86         per_cpu(cpu_sys_devices, logical_cpu) = NULL;
87         return;
88 }
89
90 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
91 static ssize_t cpu_probe_store(struct device *dev,
92                                struct device_attribute *attr,
93                                const char *buf,
94                                size_t count)
95 {
96         return arch_cpu_probe(buf, count);
97 }
98
99 static ssize_t cpu_release_store(struct device *dev,
100                                  struct device_attribute *attr,
101                                  const char *buf,
102                                  size_t count)
103 {
104         return arch_cpu_release(buf, count);
105 }
106
107 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
108 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
109 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
110 #endif /* CONFIG_HOTPLUG_CPU */
111
112 struct bus_type cpu_subsys = {
113         .name = "cpu",
114         .dev_name = "cpu",
115         .match = cpu_subsys_match,
116 #ifdef CONFIG_HOTPLUG_CPU
117         .online = cpu_subsys_online,
118         .offline = cpu_subsys_offline,
119 #endif
120 };
121 EXPORT_SYMBOL_GPL(cpu_subsys);
122
123 #ifdef CONFIG_KEXEC
124 #include <linux/kexec.h>
125
126 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
127                                 char *buf)
128 {
129         struct cpu *cpu = container_of(dev, struct cpu, dev);
130         ssize_t rc;
131         unsigned long long addr;
132         int cpunum;
133
134         cpunum = cpu->dev.id;
135
136         /*
137          * Might be reading other cpu's data based on which cpu read thread
138          * has been scheduled. But cpu data (memory) is allocated once during
139          * boot up and this data does not change there after. Hence this
140          * operation should be safe. No locking required.
141          */
142         addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
143         rc = sprintf(buf, "%Lx\n", addr);
144         return rc;
145 }
146 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
147
148 static ssize_t show_crash_notes_size(struct device *dev,
149                                      struct device_attribute *attr,
150                                      char *buf)
151 {
152         ssize_t rc;
153
154         rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
155         return rc;
156 }
157 static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
158
159 static struct attribute *crash_note_cpu_attrs[] = {
160         &dev_attr_crash_notes.attr,
161         &dev_attr_crash_notes_size.attr,
162         NULL
163 };
164
165 static struct attribute_group crash_note_cpu_attr_group = {
166         .attrs = crash_note_cpu_attrs,
167 };
168 #endif
169
170 static const struct attribute_group *common_cpu_attr_groups[] = {
171 #ifdef CONFIG_KEXEC
172         &crash_note_cpu_attr_group,
173 #endif
174         NULL
175 };
176
177 static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
178 #ifdef CONFIG_KEXEC
179         &crash_note_cpu_attr_group,
180 #endif
181         NULL
182 };
183
184 /*
185  * Print cpu online, possible, present, and system maps
186  */
187
188 struct cpu_attr {
189         struct device_attribute attr;
190         const struct cpumask *const * const map;
191 };
192
193 static ssize_t show_cpus_attr(struct device *dev,
194                               struct device_attribute *attr,
195                               char *buf)
196 {
197         struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
198         int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
199
200         buf[n++] = '\n';
201         buf[n] = '\0';
202         return n;
203 }
204
205 #define _CPU_ATTR(name, map) \
206         { __ATTR(name, 0444, show_cpus_attr, NULL), map }
207
208 /* Keep in sync with cpu_subsys_attrs */
209 static struct cpu_attr cpu_attrs[] = {
210         _CPU_ATTR(online, &cpu_online_mask),
211         _CPU_ATTR(possible, &cpu_possible_mask),
212         _CPU_ATTR(present, &cpu_present_mask),
213 };
214
215 /*
216  * Print values for NR_CPUS and offlined cpus
217  */
218 static ssize_t print_cpus_kernel_max(struct device *dev,
219                                      struct device_attribute *attr, char *buf)
220 {
221         int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
222         return n;
223 }
224 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
225
226 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
227 unsigned int total_cpus;
228
229 static ssize_t print_cpus_offline(struct device *dev,
230                                   struct device_attribute *attr, char *buf)
231 {
232         int n = 0, len = PAGE_SIZE-2;
233         cpumask_var_t offline;
234
235         /* display offline cpus < nr_cpu_ids */
236         if (!alloc_cpumask_var(&offline, GFP_KERNEL))
237                 return -ENOMEM;
238         cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
239         n = cpulist_scnprintf(buf, len, offline);
240         free_cpumask_var(offline);
241
242         /* display offline cpus >= nr_cpu_ids */
243         if (total_cpus && nr_cpu_ids < total_cpus) {
244                 if (n && n < len)
245                         buf[n++] = ',';
246
247                 if (nr_cpu_ids == total_cpus-1)
248                         n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
249                 else
250                         n += snprintf(&buf[n], len - n, "%d-%d",
251                                                       nr_cpu_ids, total_cpus-1);
252         }
253
254         n += snprintf(&buf[n], len - n, "\n");
255         return n;
256 }
257 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
258
259 static void cpu_device_release(struct device *dev)
260 {
261         /*
262          * This is an empty function to prevent the driver core from spitting a
263          * warning at us.  Yes, I know this is directly opposite of what the
264          * documentation for the driver core and kobjects say, and the author
265          * of this code has already been publically ridiculed for doing
266          * something as foolish as this.  However, at this point in time, it is
267          * the only way to handle the issue of statically allocated cpu
268          * devices.  The different architectures will have their cpu device
269          * code reworked to properly handle this in the near future, so this
270          * function will then be changed to correctly free up the memory held
271          * by the cpu device.
272          *
273          * Never copy this way of doing things, or you too will be made fun of
274          * on the linux-kernel list, you have been warned.
275          */
276 }
277
278 /*
279  * register_cpu - Setup a sysfs device for a CPU.
280  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
281  *        sysfs for this CPU.
282  * @num - CPU number to use when creating the device.
283  *
284  * Initialize and register the CPU device.
285  */
286 int register_cpu(struct cpu *cpu, int num)
287 {
288         int error;
289
290         cpu->node_id = cpu_to_node(num);
291         memset(&cpu->dev, 0x00, sizeof(struct device));
292         cpu->dev.id = num;
293         cpu->dev.bus = &cpu_subsys;
294         cpu->dev.release = cpu_device_release;
295         cpu->dev.offline_disabled = !cpu->hotpluggable;
296         cpu->dev.offline = !cpu_online(num);
297         cpu->dev.of_node = of_get_cpu_node(num, NULL);
298 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
299         cpu->dev.bus->uevent = arch_cpu_uevent;
300 #endif
301         cpu->dev.groups = common_cpu_attr_groups;
302         if (cpu->hotpluggable)
303                 cpu->dev.groups = hotplugable_cpu_attr_groups;
304         error = device_register(&cpu->dev);
305         if (!error)
306                 per_cpu(cpu_sys_devices, num) = &cpu->dev;
307         if (!error)
308                 register_cpu_under_node(num, cpu_to_node(num));
309
310         return error;
311 }
312
313 struct device *get_cpu_device(unsigned cpu)
314 {
315         if (cpu < nr_cpu_ids && cpu_possible(cpu))
316                 return per_cpu(cpu_sys_devices, cpu);
317         else
318                 return NULL;
319 }
320 EXPORT_SYMBOL_GPL(get_cpu_device);
321
322 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
323 static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
324 #endif
325
326 static struct attribute *cpu_root_attrs[] = {
327 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
328         &dev_attr_probe.attr,
329         &dev_attr_release.attr,
330 #endif
331         &cpu_attrs[0].attr.attr,
332         &cpu_attrs[1].attr.attr,
333         &cpu_attrs[2].attr.attr,
334         &dev_attr_kernel_max.attr,
335         &dev_attr_offline.attr,
336 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
337         &dev_attr_modalias.attr,
338 #endif
339         NULL
340 };
341
342 static struct attribute_group cpu_root_attr_group = {
343         .attrs = cpu_root_attrs,
344 };
345
346 static const struct attribute_group *cpu_root_attr_groups[] = {
347         &cpu_root_attr_group,
348         NULL,
349 };
350
351 bool cpu_is_hotpluggable(unsigned cpu)
352 {
353         struct device *dev = get_cpu_device(cpu);
354         return dev && container_of(dev, struct cpu, dev)->hotpluggable;
355 }
356 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
357
358 #ifdef CONFIG_GENERIC_CPU_DEVICES
359 static DEFINE_PER_CPU(struct cpu, cpu_devices);
360 #endif
361
362 static void __init cpu_dev_register_generic(void)
363 {
364 #ifdef CONFIG_GENERIC_CPU_DEVICES
365         int i;
366
367         for_each_possible_cpu(i) {
368                 if (register_cpu(&per_cpu(cpu_devices, i), i))
369                         panic("Failed to register CPU device");
370         }
371 #endif
372 }
373
374 void __init cpu_dev_init(void)
375 {
376         if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
377                 panic("Failed to register CPU subsystem");
378
379         cpu_dev_register_generic();
380 }