Merge tag 'regmap-fix-v5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / drivers / hv / vmbus_drv.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  *   K. Y. Srinivasan <kys@microsoft.com>
9  */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
25
26 #include <linux/delay.h>
27 #include <linux/notifier.h>
28 #include <linux/panic_notifier.h>
29 #include <linux/ptrace.h>
30 #include <linux/screen_info.h>
31 #include <linux/kdebug.h>
32 #include <linux/efi.h>
33 #include <linux/random.h>
34 #include <linux/kernel.h>
35 #include <linux/syscore_ops.h>
36 #include <clocksource/hyperv_timer.h>
37 #include "hyperv_vmbus.h"
38
39 struct vmbus_dynid {
40         struct list_head node;
41         struct hv_vmbus_device_id id;
42 };
43
44 static struct acpi_device  *hv_acpi_dev;
45
46 static struct completion probe_event;
47
48 static int hyperv_cpuhp_online;
49
50 static void *hv_panic_page;
51
52 static long __percpu *vmbus_evt;
53
54 /* Values parsed from ACPI DSDT */
55 int vmbus_irq;
56 int vmbus_interrupt;
57
58 /*
59  * Boolean to control whether to report panic messages over Hyper-V.
60  *
61  * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
62  */
63 static int sysctl_record_panic_msg = 1;
64
65 static int hyperv_report_reg(void)
66 {
67         return !sysctl_record_panic_msg || !hv_panic_page;
68 }
69
70 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
71                               void *args)
72 {
73         struct pt_regs *regs;
74
75         vmbus_initiate_unload(true);
76
77         /*
78          * Hyper-V should be notified only once about a panic.  If we will be
79          * doing hyperv_report_panic_msg() later with kmsg data, don't do
80          * the notification here.
81          */
82         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
83             && hyperv_report_reg()) {
84                 regs = current_pt_regs();
85                 hyperv_report_panic(regs, val, false);
86         }
87         return NOTIFY_DONE;
88 }
89
90 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
91                             void *args)
92 {
93         struct die_args *die = args;
94         struct pt_regs *regs = die->regs;
95
96         /* Don't notify Hyper-V if the die event is other than oops */
97         if (val != DIE_OOPS)
98                 return NOTIFY_DONE;
99
100         /*
101          * Hyper-V should be notified only once about a panic.  If we will be
102          * doing hyperv_report_panic_msg() later with kmsg data, don't do
103          * the notification here.
104          */
105         if (hyperv_report_reg())
106                 hyperv_report_panic(regs, val, true);
107         return NOTIFY_DONE;
108 }
109
110 static struct notifier_block hyperv_die_block = {
111         .notifier_call = hyperv_die_event,
112 };
113 static struct notifier_block hyperv_panic_block = {
114         .notifier_call = hyperv_panic_event,
115 };
116
117 static const char *fb_mmio_name = "fb_range";
118 static struct resource *fb_mmio;
119 static struct resource *hyperv_mmio;
120 static DEFINE_MUTEX(hyperv_mmio_lock);
121
122 static int vmbus_exists(void)
123 {
124         if (hv_acpi_dev == NULL)
125                 return -ENODEV;
126
127         return 0;
128 }
129
130 static u8 channel_monitor_group(const struct vmbus_channel *channel)
131 {
132         return (u8)channel->offermsg.monitorid / 32;
133 }
134
135 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
136 {
137         return (u8)channel->offermsg.monitorid % 32;
138 }
139
140 static u32 channel_pending(const struct vmbus_channel *channel,
141                            const struct hv_monitor_page *monitor_page)
142 {
143         u8 monitor_group = channel_monitor_group(channel);
144
145         return monitor_page->trigger_group[monitor_group].pending;
146 }
147
148 static u32 channel_latency(const struct vmbus_channel *channel,
149                            const struct hv_monitor_page *monitor_page)
150 {
151         u8 monitor_group = channel_monitor_group(channel);
152         u8 monitor_offset = channel_monitor_offset(channel);
153
154         return monitor_page->latency[monitor_group][monitor_offset];
155 }
156
157 static u32 channel_conn_id(struct vmbus_channel *channel,
158                            struct hv_monitor_page *monitor_page)
159 {
160         u8 monitor_group = channel_monitor_group(channel);
161         u8 monitor_offset = channel_monitor_offset(channel);
162
163         return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
164 }
165
166 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
167                        char *buf)
168 {
169         struct hv_device *hv_dev = device_to_hv_device(dev);
170
171         if (!hv_dev->channel)
172                 return -ENODEV;
173         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
174 }
175 static DEVICE_ATTR_RO(id);
176
177 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
178                           char *buf)
179 {
180         struct hv_device *hv_dev = device_to_hv_device(dev);
181
182         if (!hv_dev->channel)
183                 return -ENODEV;
184         return sprintf(buf, "%d\n", hv_dev->channel->state);
185 }
186 static DEVICE_ATTR_RO(state);
187
188 static ssize_t monitor_id_show(struct device *dev,
189                                struct device_attribute *dev_attr, char *buf)
190 {
191         struct hv_device *hv_dev = device_to_hv_device(dev);
192
193         if (!hv_dev->channel)
194                 return -ENODEV;
195         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
196 }
197 static DEVICE_ATTR_RO(monitor_id);
198
199 static ssize_t class_id_show(struct device *dev,
200                                struct device_attribute *dev_attr, char *buf)
201 {
202         struct hv_device *hv_dev = device_to_hv_device(dev);
203
204         if (!hv_dev->channel)
205                 return -ENODEV;
206         return sprintf(buf, "{%pUl}\n",
207                        &hv_dev->channel->offermsg.offer.if_type);
208 }
209 static DEVICE_ATTR_RO(class_id);
210
211 static ssize_t device_id_show(struct device *dev,
212                               struct device_attribute *dev_attr, char *buf)
213 {
214         struct hv_device *hv_dev = device_to_hv_device(dev);
215
216         if (!hv_dev->channel)
217                 return -ENODEV;
218         return sprintf(buf, "{%pUl}\n",
219                        &hv_dev->channel->offermsg.offer.if_instance);
220 }
221 static DEVICE_ATTR_RO(device_id);
222
223 static ssize_t modalias_show(struct device *dev,
224                              struct device_attribute *dev_attr, char *buf)
225 {
226         struct hv_device *hv_dev = device_to_hv_device(dev);
227
228         return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
229 }
230 static DEVICE_ATTR_RO(modalias);
231
232 #ifdef CONFIG_NUMA
233 static ssize_t numa_node_show(struct device *dev,
234                               struct device_attribute *attr, char *buf)
235 {
236         struct hv_device *hv_dev = device_to_hv_device(dev);
237
238         if (!hv_dev->channel)
239                 return -ENODEV;
240
241         return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
242 }
243 static DEVICE_ATTR_RO(numa_node);
244 #endif
245
246 static ssize_t server_monitor_pending_show(struct device *dev,
247                                            struct device_attribute *dev_attr,
248                                            char *buf)
249 {
250         struct hv_device *hv_dev = device_to_hv_device(dev);
251
252         if (!hv_dev->channel)
253                 return -ENODEV;
254         return sprintf(buf, "%d\n",
255                        channel_pending(hv_dev->channel,
256                                        vmbus_connection.monitor_pages[0]));
257 }
258 static DEVICE_ATTR_RO(server_monitor_pending);
259
260 static ssize_t client_monitor_pending_show(struct device *dev,
261                                            struct device_attribute *dev_attr,
262                                            char *buf)
263 {
264         struct hv_device *hv_dev = device_to_hv_device(dev);
265
266         if (!hv_dev->channel)
267                 return -ENODEV;
268         return sprintf(buf, "%d\n",
269                        channel_pending(hv_dev->channel,
270                                        vmbus_connection.monitor_pages[1]));
271 }
272 static DEVICE_ATTR_RO(client_monitor_pending);
273
274 static ssize_t server_monitor_latency_show(struct device *dev,
275                                            struct device_attribute *dev_attr,
276                                            char *buf)
277 {
278         struct hv_device *hv_dev = device_to_hv_device(dev);
279
280         if (!hv_dev->channel)
281                 return -ENODEV;
282         return sprintf(buf, "%d\n",
283                        channel_latency(hv_dev->channel,
284                                        vmbus_connection.monitor_pages[0]));
285 }
286 static DEVICE_ATTR_RO(server_monitor_latency);
287
288 static ssize_t client_monitor_latency_show(struct device *dev,
289                                            struct device_attribute *dev_attr,
290                                            char *buf)
291 {
292         struct hv_device *hv_dev = device_to_hv_device(dev);
293
294         if (!hv_dev->channel)
295                 return -ENODEV;
296         return sprintf(buf, "%d\n",
297                        channel_latency(hv_dev->channel,
298                                        vmbus_connection.monitor_pages[1]));
299 }
300 static DEVICE_ATTR_RO(client_monitor_latency);
301
302 static ssize_t server_monitor_conn_id_show(struct device *dev,
303                                            struct device_attribute *dev_attr,
304                                            char *buf)
305 {
306         struct hv_device *hv_dev = device_to_hv_device(dev);
307
308         if (!hv_dev->channel)
309                 return -ENODEV;
310         return sprintf(buf, "%d\n",
311                        channel_conn_id(hv_dev->channel,
312                                        vmbus_connection.monitor_pages[0]));
313 }
314 static DEVICE_ATTR_RO(server_monitor_conn_id);
315
316 static ssize_t client_monitor_conn_id_show(struct device *dev,
317                                            struct device_attribute *dev_attr,
318                                            char *buf)
319 {
320         struct hv_device *hv_dev = device_to_hv_device(dev);
321
322         if (!hv_dev->channel)
323                 return -ENODEV;
324         return sprintf(buf, "%d\n",
325                        channel_conn_id(hv_dev->channel,
326                                        vmbus_connection.monitor_pages[1]));
327 }
328 static DEVICE_ATTR_RO(client_monitor_conn_id);
329
330 static ssize_t out_intr_mask_show(struct device *dev,
331                                   struct device_attribute *dev_attr, char *buf)
332 {
333         struct hv_device *hv_dev = device_to_hv_device(dev);
334         struct hv_ring_buffer_debug_info outbound;
335         int ret;
336
337         if (!hv_dev->channel)
338                 return -ENODEV;
339
340         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
341                                           &outbound);
342         if (ret < 0)
343                 return ret;
344
345         return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
346 }
347 static DEVICE_ATTR_RO(out_intr_mask);
348
349 static ssize_t out_read_index_show(struct device *dev,
350                                    struct device_attribute *dev_attr, char *buf)
351 {
352         struct hv_device *hv_dev = device_to_hv_device(dev);
353         struct hv_ring_buffer_debug_info outbound;
354         int ret;
355
356         if (!hv_dev->channel)
357                 return -ENODEV;
358
359         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
360                                           &outbound);
361         if (ret < 0)
362                 return ret;
363         return sprintf(buf, "%d\n", outbound.current_read_index);
364 }
365 static DEVICE_ATTR_RO(out_read_index);
366
367 static ssize_t out_write_index_show(struct device *dev,
368                                     struct device_attribute *dev_attr,
369                                     char *buf)
370 {
371         struct hv_device *hv_dev = device_to_hv_device(dev);
372         struct hv_ring_buffer_debug_info outbound;
373         int ret;
374
375         if (!hv_dev->channel)
376                 return -ENODEV;
377
378         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
379                                           &outbound);
380         if (ret < 0)
381                 return ret;
382         return sprintf(buf, "%d\n", outbound.current_write_index);
383 }
384 static DEVICE_ATTR_RO(out_write_index);
385
386 static ssize_t out_read_bytes_avail_show(struct device *dev,
387                                          struct device_attribute *dev_attr,
388                                          char *buf)
389 {
390         struct hv_device *hv_dev = device_to_hv_device(dev);
391         struct hv_ring_buffer_debug_info outbound;
392         int ret;
393
394         if (!hv_dev->channel)
395                 return -ENODEV;
396
397         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
398                                           &outbound);
399         if (ret < 0)
400                 return ret;
401         return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
402 }
403 static DEVICE_ATTR_RO(out_read_bytes_avail);
404
405 static ssize_t out_write_bytes_avail_show(struct device *dev,
406                                           struct device_attribute *dev_attr,
407                                           char *buf)
408 {
409         struct hv_device *hv_dev = device_to_hv_device(dev);
410         struct hv_ring_buffer_debug_info outbound;
411         int ret;
412
413         if (!hv_dev->channel)
414                 return -ENODEV;
415
416         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
417                                           &outbound);
418         if (ret < 0)
419                 return ret;
420         return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
421 }
422 static DEVICE_ATTR_RO(out_write_bytes_avail);
423
424 static ssize_t in_intr_mask_show(struct device *dev,
425                                  struct device_attribute *dev_attr, char *buf)
426 {
427         struct hv_device *hv_dev = device_to_hv_device(dev);
428         struct hv_ring_buffer_debug_info inbound;
429         int ret;
430
431         if (!hv_dev->channel)
432                 return -ENODEV;
433
434         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
435         if (ret < 0)
436                 return ret;
437
438         return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
439 }
440 static DEVICE_ATTR_RO(in_intr_mask);
441
442 static ssize_t in_read_index_show(struct device *dev,
443                                   struct device_attribute *dev_attr, char *buf)
444 {
445         struct hv_device *hv_dev = device_to_hv_device(dev);
446         struct hv_ring_buffer_debug_info inbound;
447         int ret;
448
449         if (!hv_dev->channel)
450                 return -ENODEV;
451
452         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
453         if (ret < 0)
454                 return ret;
455
456         return sprintf(buf, "%d\n", inbound.current_read_index);
457 }
458 static DEVICE_ATTR_RO(in_read_index);
459
460 static ssize_t in_write_index_show(struct device *dev,
461                                    struct device_attribute *dev_attr, char *buf)
462 {
463         struct hv_device *hv_dev = device_to_hv_device(dev);
464         struct hv_ring_buffer_debug_info inbound;
465         int ret;
466
467         if (!hv_dev->channel)
468                 return -ENODEV;
469
470         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
471         if (ret < 0)
472                 return ret;
473
474         return sprintf(buf, "%d\n", inbound.current_write_index);
475 }
476 static DEVICE_ATTR_RO(in_write_index);
477
478 static ssize_t in_read_bytes_avail_show(struct device *dev,
479                                         struct device_attribute *dev_attr,
480                                         char *buf)
481 {
482         struct hv_device *hv_dev = device_to_hv_device(dev);
483         struct hv_ring_buffer_debug_info inbound;
484         int ret;
485
486         if (!hv_dev->channel)
487                 return -ENODEV;
488
489         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
490         if (ret < 0)
491                 return ret;
492
493         return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
494 }
495 static DEVICE_ATTR_RO(in_read_bytes_avail);
496
497 static ssize_t in_write_bytes_avail_show(struct device *dev,
498                                          struct device_attribute *dev_attr,
499                                          char *buf)
500 {
501         struct hv_device *hv_dev = device_to_hv_device(dev);
502         struct hv_ring_buffer_debug_info inbound;
503         int ret;
504
505         if (!hv_dev->channel)
506                 return -ENODEV;
507
508         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
509         if (ret < 0)
510                 return ret;
511
512         return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
513 }
514 static DEVICE_ATTR_RO(in_write_bytes_avail);
515
516 static ssize_t channel_vp_mapping_show(struct device *dev,
517                                        struct device_attribute *dev_attr,
518                                        char *buf)
519 {
520         struct hv_device *hv_dev = device_to_hv_device(dev);
521         struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
522         int buf_size = PAGE_SIZE, n_written, tot_written;
523         struct list_head *cur;
524
525         if (!channel)
526                 return -ENODEV;
527
528         mutex_lock(&vmbus_connection.channel_mutex);
529
530         tot_written = snprintf(buf, buf_size, "%u:%u\n",
531                 channel->offermsg.child_relid, channel->target_cpu);
532
533         list_for_each(cur, &channel->sc_list) {
534                 if (tot_written >= buf_size - 1)
535                         break;
536
537                 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
538                 n_written = scnprintf(buf + tot_written,
539                                      buf_size - tot_written,
540                                      "%u:%u\n",
541                                      cur_sc->offermsg.child_relid,
542                                      cur_sc->target_cpu);
543                 tot_written += n_written;
544         }
545
546         mutex_unlock(&vmbus_connection.channel_mutex);
547
548         return tot_written;
549 }
550 static DEVICE_ATTR_RO(channel_vp_mapping);
551
552 static ssize_t vendor_show(struct device *dev,
553                            struct device_attribute *dev_attr,
554                            char *buf)
555 {
556         struct hv_device *hv_dev = device_to_hv_device(dev);
557
558         return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
559 }
560 static DEVICE_ATTR_RO(vendor);
561
562 static ssize_t device_show(struct device *dev,
563                            struct device_attribute *dev_attr,
564                            char *buf)
565 {
566         struct hv_device *hv_dev = device_to_hv_device(dev);
567
568         return sprintf(buf, "0x%x\n", hv_dev->device_id);
569 }
570 static DEVICE_ATTR_RO(device);
571
572 static ssize_t driver_override_store(struct device *dev,
573                                      struct device_attribute *attr,
574                                      const char *buf, size_t count)
575 {
576         struct hv_device *hv_dev = device_to_hv_device(dev);
577         char *driver_override, *old, *cp;
578
579         /* We need to keep extra room for a newline */
580         if (count >= (PAGE_SIZE - 1))
581                 return -EINVAL;
582
583         driver_override = kstrndup(buf, count, GFP_KERNEL);
584         if (!driver_override)
585                 return -ENOMEM;
586
587         cp = strchr(driver_override, '\n');
588         if (cp)
589                 *cp = '\0';
590
591         device_lock(dev);
592         old = hv_dev->driver_override;
593         if (strlen(driver_override)) {
594                 hv_dev->driver_override = driver_override;
595         } else {
596                 kfree(driver_override);
597                 hv_dev->driver_override = NULL;
598         }
599         device_unlock(dev);
600
601         kfree(old);
602
603         return count;
604 }
605
606 static ssize_t driver_override_show(struct device *dev,
607                                     struct device_attribute *attr, char *buf)
608 {
609         struct hv_device *hv_dev = device_to_hv_device(dev);
610         ssize_t len;
611
612         device_lock(dev);
613         len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
614         device_unlock(dev);
615
616         return len;
617 }
618 static DEVICE_ATTR_RW(driver_override);
619
620 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
621 static struct attribute *vmbus_dev_attrs[] = {
622         &dev_attr_id.attr,
623         &dev_attr_state.attr,
624         &dev_attr_monitor_id.attr,
625         &dev_attr_class_id.attr,
626         &dev_attr_device_id.attr,
627         &dev_attr_modalias.attr,
628 #ifdef CONFIG_NUMA
629         &dev_attr_numa_node.attr,
630 #endif
631         &dev_attr_server_monitor_pending.attr,
632         &dev_attr_client_monitor_pending.attr,
633         &dev_attr_server_monitor_latency.attr,
634         &dev_attr_client_monitor_latency.attr,
635         &dev_attr_server_monitor_conn_id.attr,
636         &dev_attr_client_monitor_conn_id.attr,
637         &dev_attr_out_intr_mask.attr,
638         &dev_attr_out_read_index.attr,
639         &dev_attr_out_write_index.attr,
640         &dev_attr_out_read_bytes_avail.attr,
641         &dev_attr_out_write_bytes_avail.attr,
642         &dev_attr_in_intr_mask.attr,
643         &dev_attr_in_read_index.attr,
644         &dev_attr_in_write_index.attr,
645         &dev_attr_in_read_bytes_avail.attr,
646         &dev_attr_in_write_bytes_avail.attr,
647         &dev_attr_channel_vp_mapping.attr,
648         &dev_attr_vendor.attr,
649         &dev_attr_device.attr,
650         &dev_attr_driver_override.attr,
651         NULL,
652 };
653
654 /*
655  * Device-level attribute_group callback function. Returns the permission for
656  * each attribute, and returns 0 if an attribute is not visible.
657  */
658 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
659                                          struct attribute *attr, int idx)
660 {
661         struct device *dev = kobj_to_dev(kobj);
662         const struct hv_device *hv_dev = device_to_hv_device(dev);
663
664         /* Hide the monitor attributes if the monitor mechanism is not used. */
665         if (!hv_dev->channel->offermsg.monitor_allocated &&
666             (attr == &dev_attr_monitor_id.attr ||
667              attr == &dev_attr_server_monitor_pending.attr ||
668              attr == &dev_attr_client_monitor_pending.attr ||
669              attr == &dev_attr_server_monitor_latency.attr ||
670              attr == &dev_attr_client_monitor_latency.attr ||
671              attr == &dev_attr_server_monitor_conn_id.attr ||
672              attr == &dev_attr_client_monitor_conn_id.attr))
673                 return 0;
674
675         return attr->mode;
676 }
677
678 static const struct attribute_group vmbus_dev_group = {
679         .attrs = vmbus_dev_attrs,
680         .is_visible = vmbus_dev_attr_is_visible
681 };
682 __ATTRIBUTE_GROUPS(vmbus_dev);
683
684 /* Set up the attribute for /sys/bus/vmbus/hibernation */
685 static ssize_t hibernation_show(struct bus_type *bus, char *buf)
686 {
687         return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
688 }
689
690 static BUS_ATTR_RO(hibernation);
691
692 static struct attribute *vmbus_bus_attrs[] = {
693         &bus_attr_hibernation.attr,
694         NULL,
695 };
696 static const struct attribute_group vmbus_bus_group = {
697         .attrs = vmbus_bus_attrs,
698 };
699 __ATTRIBUTE_GROUPS(vmbus_bus);
700
701 /*
702  * vmbus_uevent - add uevent for our device
703  *
704  * This routine is invoked when a device is added or removed on the vmbus to
705  * generate a uevent to udev in the userspace. The udev will then look at its
706  * rule and the uevent generated here to load the appropriate driver
707  *
708  * The alias string will be of the form vmbus:guid where guid is the string
709  * representation of the device guid (each byte of the guid will be
710  * represented with two hex characters.
711  */
712 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
713 {
714         struct hv_device *dev = device_to_hv_device(device);
715         const char *format = "MODALIAS=vmbus:%*phN";
716
717         return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
718 }
719
720 static const struct hv_vmbus_device_id *
721 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
722 {
723         if (id == NULL)
724                 return NULL; /* empty device table */
725
726         for (; !guid_is_null(&id->guid); id++)
727                 if (guid_equal(&id->guid, guid))
728                         return id;
729
730         return NULL;
731 }
732
733 static const struct hv_vmbus_device_id *
734 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
735 {
736         const struct hv_vmbus_device_id *id = NULL;
737         struct vmbus_dynid *dynid;
738
739         spin_lock(&drv->dynids.lock);
740         list_for_each_entry(dynid, &drv->dynids.list, node) {
741                 if (guid_equal(&dynid->id.guid, guid)) {
742                         id = &dynid->id;
743                         break;
744                 }
745         }
746         spin_unlock(&drv->dynids.lock);
747
748         return id;
749 }
750
751 static const struct hv_vmbus_device_id vmbus_device_null;
752
753 /*
754  * Return a matching hv_vmbus_device_id pointer.
755  * If there is no match, return NULL.
756  */
757 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
758                                                         struct hv_device *dev)
759 {
760         const guid_t *guid = &dev->dev_type;
761         const struct hv_vmbus_device_id *id;
762
763         /* When driver_override is set, only bind to the matching driver */
764         if (dev->driver_override && strcmp(dev->driver_override, drv->name))
765                 return NULL;
766
767         /* Look at the dynamic ids first, before the static ones */
768         id = hv_vmbus_dynid_match(drv, guid);
769         if (!id)
770                 id = hv_vmbus_dev_match(drv->id_table, guid);
771
772         /* driver_override will always match, send a dummy id */
773         if (!id && dev->driver_override)
774                 id = &vmbus_device_null;
775
776         return id;
777 }
778
779 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
780 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
781 {
782         struct vmbus_dynid *dynid;
783
784         dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
785         if (!dynid)
786                 return -ENOMEM;
787
788         dynid->id.guid = *guid;
789
790         spin_lock(&drv->dynids.lock);
791         list_add_tail(&dynid->node, &drv->dynids.list);
792         spin_unlock(&drv->dynids.lock);
793
794         return driver_attach(&drv->driver);
795 }
796
797 static void vmbus_free_dynids(struct hv_driver *drv)
798 {
799         struct vmbus_dynid *dynid, *n;
800
801         spin_lock(&drv->dynids.lock);
802         list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
803                 list_del(&dynid->node);
804                 kfree(dynid);
805         }
806         spin_unlock(&drv->dynids.lock);
807 }
808
809 /*
810  * store_new_id - sysfs frontend to vmbus_add_dynid()
811  *
812  * Allow GUIDs to be added to an existing driver via sysfs.
813  */
814 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
815                             size_t count)
816 {
817         struct hv_driver *drv = drv_to_hv_drv(driver);
818         guid_t guid;
819         ssize_t retval;
820
821         retval = guid_parse(buf, &guid);
822         if (retval)
823                 return retval;
824
825         if (hv_vmbus_dynid_match(drv, &guid))
826                 return -EEXIST;
827
828         retval = vmbus_add_dynid(drv, &guid);
829         if (retval)
830                 return retval;
831         return count;
832 }
833 static DRIVER_ATTR_WO(new_id);
834
835 /*
836  * store_remove_id - remove a PCI device ID from this driver
837  *
838  * Removes a dynamic pci device ID to this driver.
839  */
840 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
841                                size_t count)
842 {
843         struct hv_driver *drv = drv_to_hv_drv(driver);
844         struct vmbus_dynid *dynid, *n;
845         guid_t guid;
846         ssize_t retval;
847
848         retval = guid_parse(buf, &guid);
849         if (retval)
850                 return retval;
851
852         retval = -ENODEV;
853         spin_lock(&drv->dynids.lock);
854         list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
855                 struct hv_vmbus_device_id *id = &dynid->id;
856
857                 if (guid_equal(&id->guid, &guid)) {
858                         list_del(&dynid->node);
859                         kfree(dynid);
860                         retval = count;
861                         break;
862                 }
863         }
864         spin_unlock(&drv->dynids.lock);
865
866         return retval;
867 }
868 static DRIVER_ATTR_WO(remove_id);
869
870 static struct attribute *vmbus_drv_attrs[] = {
871         &driver_attr_new_id.attr,
872         &driver_attr_remove_id.attr,
873         NULL,
874 };
875 ATTRIBUTE_GROUPS(vmbus_drv);
876
877
878 /*
879  * vmbus_match - Attempt to match the specified device to the specified driver
880  */
881 static int vmbus_match(struct device *device, struct device_driver *driver)
882 {
883         struct hv_driver *drv = drv_to_hv_drv(driver);
884         struct hv_device *hv_dev = device_to_hv_device(device);
885
886         /* The hv_sock driver handles all hv_sock offers. */
887         if (is_hvsock_channel(hv_dev->channel))
888                 return drv->hvsock;
889
890         if (hv_vmbus_get_id(drv, hv_dev))
891                 return 1;
892
893         return 0;
894 }
895
896 /*
897  * vmbus_probe - Add the new vmbus's child device
898  */
899 static int vmbus_probe(struct device *child_device)
900 {
901         int ret = 0;
902         struct hv_driver *drv =
903                         drv_to_hv_drv(child_device->driver);
904         struct hv_device *dev = device_to_hv_device(child_device);
905         const struct hv_vmbus_device_id *dev_id;
906
907         dev_id = hv_vmbus_get_id(drv, dev);
908         if (drv->probe) {
909                 ret = drv->probe(dev, dev_id);
910                 if (ret != 0)
911                         pr_err("probe failed for device %s (%d)\n",
912                                dev_name(child_device), ret);
913
914         } else {
915                 pr_err("probe not set for driver %s\n",
916                        dev_name(child_device));
917                 ret = -ENODEV;
918         }
919         return ret;
920 }
921
922 /*
923  * vmbus_remove - Remove a vmbus device
924  */
925 static void vmbus_remove(struct device *child_device)
926 {
927         struct hv_driver *drv;
928         struct hv_device *dev = device_to_hv_device(child_device);
929
930         if (child_device->driver) {
931                 drv = drv_to_hv_drv(child_device->driver);
932                 if (drv->remove)
933                         drv->remove(dev);
934         }
935 }
936
937 /*
938  * vmbus_shutdown - Shutdown a vmbus device
939  */
940 static void vmbus_shutdown(struct device *child_device)
941 {
942         struct hv_driver *drv;
943         struct hv_device *dev = device_to_hv_device(child_device);
944
945
946         /* The device may not be attached yet */
947         if (!child_device->driver)
948                 return;
949
950         drv = drv_to_hv_drv(child_device->driver);
951
952         if (drv->shutdown)
953                 drv->shutdown(dev);
954 }
955
956 #ifdef CONFIG_PM_SLEEP
957 /*
958  * vmbus_suspend - Suspend a vmbus device
959  */
960 static int vmbus_suspend(struct device *child_device)
961 {
962         struct hv_driver *drv;
963         struct hv_device *dev = device_to_hv_device(child_device);
964
965         /* The device may not be attached yet */
966         if (!child_device->driver)
967                 return 0;
968
969         drv = drv_to_hv_drv(child_device->driver);
970         if (!drv->suspend)
971                 return -EOPNOTSUPP;
972
973         return drv->suspend(dev);
974 }
975
976 /*
977  * vmbus_resume - Resume a vmbus device
978  */
979 static int vmbus_resume(struct device *child_device)
980 {
981         struct hv_driver *drv;
982         struct hv_device *dev = device_to_hv_device(child_device);
983
984         /* The device may not be attached yet */
985         if (!child_device->driver)
986                 return 0;
987
988         drv = drv_to_hv_drv(child_device->driver);
989         if (!drv->resume)
990                 return -EOPNOTSUPP;
991
992         return drv->resume(dev);
993 }
994 #else
995 #define vmbus_suspend NULL
996 #define vmbus_resume NULL
997 #endif /* CONFIG_PM_SLEEP */
998
999 /*
1000  * vmbus_device_release - Final callback release of the vmbus child device
1001  */
1002 static void vmbus_device_release(struct device *device)
1003 {
1004         struct hv_device *hv_dev = device_to_hv_device(device);
1005         struct vmbus_channel *channel = hv_dev->channel;
1006
1007         hv_debug_rm_dev_dir(hv_dev);
1008
1009         mutex_lock(&vmbus_connection.channel_mutex);
1010         hv_process_channel_removal(channel);
1011         mutex_unlock(&vmbus_connection.channel_mutex);
1012         kfree(hv_dev);
1013 }
1014
1015 /*
1016  * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
1017  *
1018  * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
1019  * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1020  * is no way to wake up a Generation-2 VM.
1021  *
1022  * The other 4 ops are for hibernation.
1023  */
1024
1025 static const struct dev_pm_ops vmbus_pm = {
1026         .suspend_noirq  = NULL,
1027         .resume_noirq   = NULL,
1028         .freeze_noirq   = vmbus_suspend,
1029         .thaw_noirq     = vmbus_resume,
1030         .poweroff_noirq = vmbus_suspend,
1031         .restore_noirq  = vmbus_resume,
1032 };
1033
1034 /* The one and only one */
1035 static struct bus_type  hv_bus = {
1036         .name =         "vmbus",
1037         .match =                vmbus_match,
1038         .shutdown =             vmbus_shutdown,
1039         .remove =               vmbus_remove,
1040         .probe =                vmbus_probe,
1041         .uevent =               vmbus_uevent,
1042         .dev_groups =           vmbus_dev_groups,
1043         .drv_groups =           vmbus_drv_groups,
1044         .bus_groups =           vmbus_bus_groups,
1045         .pm =                   &vmbus_pm,
1046 };
1047
1048 struct onmessage_work_context {
1049         struct work_struct work;
1050         struct {
1051                 struct hv_message_header header;
1052                 u8 payload[];
1053         } msg;
1054 };
1055
1056 static void vmbus_onmessage_work(struct work_struct *work)
1057 {
1058         struct onmessage_work_context *ctx;
1059
1060         /* Do not process messages if we're in DISCONNECTED state */
1061         if (vmbus_connection.conn_state == DISCONNECTED)
1062                 return;
1063
1064         ctx = container_of(work, struct onmessage_work_context,
1065                            work);
1066         vmbus_onmessage((struct vmbus_channel_message_header *)
1067                         &ctx->msg.payload);
1068         kfree(ctx);
1069 }
1070
1071 void vmbus_on_msg_dpc(unsigned long data)
1072 {
1073         struct hv_per_cpu_context *hv_cpu = (void *)data;
1074         void *page_addr = hv_cpu->synic_message_page;
1075         struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1076                                   VMBUS_MESSAGE_SINT;
1077         struct vmbus_channel_message_header *hdr;
1078         enum vmbus_channel_message_type msgtype;
1079         const struct vmbus_channel_message_table_entry *entry;
1080         struct onmessage_work_context *ctx;
1081         __u8 payload_size;
1082         u32 message_type;
1083
1084         /*
1085          * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1086          * it is being used in 'struct vmbus_channel_message_header' definition
1087          * which is supposed to match hypervisor ABI.
1088          */
1089         BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1090
1091         /*
1092          * Since the message is in memory shared with the host, an erroneous or
1093          * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1094          * or individual message handlers are executing; to prevent this, copy
1095          * the message into private memory.
1096          */
1097         memcpy(&msg_copy, msg, sizeof(struct hv_message));
1098
1099         message_type = msg_copy.header.message_type;
1100         if (message_type == HVMSG_NONE)
1101                 /* no msg */
1102                 return;
1103
1104         hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1105         msgtype = hdr->msgtype;
1106
1107         trace_vmbus_on_msg_dpc(hdr);
1108
1109         if (msgtype >= CHANNELMSG_COUNT) {
1110                 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1111                 goto msg_handled;
1112         }
1113
1114         payload_size = msg_copy.header.payload_size;
1115         if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1116                 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1117                 goto msg_handled;
1118         }
1119
1120         entry = &channel_message_table[msgtype];
1121
1122         if (!entry->message_handler)
1123                 goto msg_handled;
1124
1125         if (payload_size < entry->min_payload_len) {
1126                 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1127                 goto msg_handled;
1128         }
1129
1130         if (entry->handler_type == VMHT_BLOCKING) {
1131                 ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
1132                 if (ctx == NULL)
1133                         return;
1134
1135                 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1136                 memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
1137
1138                 /*
1139                  * The host can generate a rescind message while we
1140                  * may still be handling the original offer. We deal with
1141                  * this condition by relying on the synchronization provided
1142                  * by offer_in_progress and by channel_mutex.  See also the
1143                  * inline comments in vmbus_onoffer_rescind().
1144                  */
1145                 switch (msgtype) {
1146                 case CHANNELMSG_RESCIND_CHANNELOFFER:
1147                         /*
1148                          * If we are handling the rescind message;
1149                          * schedule the work on the global work queue.
1150                          *
1151                          * The OFFER message and the RESCIND message should
1152                          * not be handled by the same serialized work queue,
1153                          * because the OFFER handler may call vmbus_open(),
1154                          * which tries to open the channel by sending an
1155                          * OPEN_CHANNEL message to the host and waits for
1156                          * the host's response; however, if the host has
1157                          * rescinded the channel before it receives the
1158                          * OPEN_CHANNEL message, the host just silently
1159                          * ignores the OPEN_CHANNEL message; as a result,
1160                          * the guest's OFFER handler hangs for ever, if we
1161                          * handle the RESCIND message in the same serialized
1162                          * work queue: the RESCIND handler can not start to
1163                          * run before the OFFER handler finishes.
1164                          */
1165                         schedule_work(&ctx->work);
1166                         break;
1167
1168                 case CHANNELMSG_OFFERCHANNEL:
1169                         /*
1170                          * The host sends the offer message of a given channel
1171                          * before sending the rescind message of the same
1172                          * channel.  These messages are sent to the guest's
1173                          * connect CPU; the guest then starts processing them
1174                          * in the tasklet handler on this CPU:
1175                          *
1176                          * VMBUS_CONNECT_CPU
1177                          *
1178                          * [vmbus_on_msg_dpc()]
1179                          * atomic_inc()  // CHANNELMSG_OFFERCHANNEL
1180                          * queue_work()
1181                          * ...
1182                          * [vmbus_on_msg_dpc()]
1183                          * schedule_work()  // CHANNELMSG_RESCIND_CHANNELOFFER
1184                          *
1185                          * We rely on the memory-ordering properties of the
1186                          * queue_work() and schedule_work() primitives, which
1187                          * guarantee that the atomic increment will be visible
1188                          * to the CPUs which will execute the offer & rescind
1189                          * works by the time these works will start execution.
1190                          */
1191                         atomic_inc(&vmbus_connection.offer_in_progress);
1192                         fallthrough;
1193
1194                 default:
1195                         queue_work(vmbus_connection.work_queue, &ctx->work);
1196                 }
1197         } else
1198                 entry->message_handler(hdr);
1199
1200 msg_handled:
1201         vmbus_signal_eom(msg, message_type);
1202 }
1203
1204 #ifdef CONFIG_PM_SLEEP
1205 /*
1206  * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1207  * hibernation, because hv_sock connections can not persist across hibernation.
1208  */
1209 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1210 {
1211         struct onmessage_work_context *ctx;
1212         struct vmbus_channel_rescind_offer *rescind;
1213
1214         WARN_ON(!is_hvsock_channel(channel));
1215
1216         /*
1217          * Allocation size is small and the allocation should really not fail,
1218          * otherwise the state of the hv_sock connections ends up in limbo.
1219          */
1220         ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1221                       GFP_KERNEL | __GFP_NOFAIL);
1222
1223         /*
1224          * So far, these are not really used by Linux. Just set them to the
1225          * reasonable values conforming to the definitions of the fields.
1226          */
1227         ctx->msg.header.message_type = 1;
1228         ctx->msg.header.payload_size = sizeof(*rescind);
1229
1230         /* These values are actually used by Linux. */
1231         rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1232         rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1233         rescind->child_relid = channel->offermsg.child_relid;
1234
1235         INIT_WORK(&ctx->work, vmbus_onmessage_work);
1236
1237         queue_work(vmbus_connection.work_queue, &ctx->work);
1238 }
1239 #endif /* CONFIG_PM_SLEEP */
1240
1241 /*
1242  * Schedule all channels with events pending
1243  */
1244 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1245 {
1246         unsigned long *recv_int_page;
1247         u32 maxbits, relid;
1248
1249         if (vmbus_proto_version < VERSION_WIN8) {
1250                 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
1251                 recv_int_page = vmbus_connection.recv_int_page;
1252         } else {
1253                 /*
1254                  * When the host is win8 and beyond, the event page
1255                  * can be directly checked to get the id of the channel
1256                  * that has the interrupt pending.
1257                  */
1258                 void *page_addr = hv_cpu->synic_event_page;
1259                 union hv_synic_event_flags *event
1260                         = (union hv_synic_event_flags *)page_addr +
1261                                                  VMBUS_MESSAGE_SINT;
1262
1263                 maxbits = HV_EVENT_FLAGS_COUNT;
1264                 recv_int_page = event->flags;
1265         }
1266
1267         if (unlikely(!recv_int_page))
1268                 return;
1269
1270         for_each_set_bit(relid, recv_int_page, maxbits) {
1271                 void (*callback_fn)(void *context);
1272                 struct vmbus_channel *channel;
1273
1274                 if (!sync_test_and_clear_bit(relid, recv_int_page))
1275                         continue;
1276
1277                 /* Special case - vmbus channel protocol msg */
1278                 if (relid == 0)
1279                         continue;
1280
1281                 /*
1282                  * Pairs with the kfree_rcu() in vmbus_chan_release().
1283                  * Guarantees that the channel data structure doesn't
1284                  * get freed while the channel pointer below is being
1285                  * dereferenced.
1286                  */
1287                 rcu_read_lock();
1288
1289                 /* Find channel based on relid */
1290                 channel = relid2channel(relid);
1291                 if (channel == NULL)
1292                         goto sched_unlock_rcu;
1293
1294                 if (channel->rescind)
1295                         goto sched_unlock_rcu;
1296
1297                 /*
1298                  * Make sure that the ring buffer data structure doesn't get
1299                  * freed while we dereference the ring buffer pointer.  Test
1300                  * for the channel's onchannel_callback being NULL within a
1301                  * sched_lock critical section.  See also the inline comments
1302                  * in vmbus_reset_channel_cb().
1303                  */
1304                 spin_lock(&channel->sched_lock);
1305
1306                 callback_fn = channel->onchannel_callback;
1307                 if (unlikely(callback_fn == NULL))
1308                         goto sched_unlock;
1309
1310                 trace_vmbus_chan_sched(channel);
1311
1312                 ++channel->interrupts;
1313
1314                 switch (channel->callback_mode) {
1315                 case HV_CALL_ISR:
1316                         (*callback_fn)(channel->channel_callback_context);
1317                         break;
1318
1319                 case HV_CALL_BATCHED:
1320                         hv_begin_read(&channel->inbound);
1321                         fallthrough;
1322                 case HV_CALL_DIRECT:
1323                         tasklet_schedule(&channel->callback_event);
1324                 }
1325
1326 sched_unlock:
1327                 spin_unlock(&channel->sched_lock);
1328 sched_unlock_rcu:
1329                 rcu_read_unlock();
1330         }
1331 }
1332
1333 static void vmbus_isr(void)
1334 {
1335         struct hv_per_cpu_context *hv_cpu
1336                 = this_cpu_ptr(hv_context.cpu_context);
1337         void *page_addr = hv_cpu->synic_event_page;
1338         struct hv_message *msg;
1339         union hv_synic_event_flags *event;
1340         bool handled = false;
1341
1342         if (unlikely(page_addr == NULL))
1343                 return;
1344
1345         event = (union hv_synic_event_flags *)page_addr +
1346                                          VMBUS_MESSAGE_SINT;
1347         /*
1348          * Check for events before checking for messages. This is the order
1349          * in which events and messages are checked in Windows guests on
1350          * Hyper-V, and the Windows team suggested we do the same.
1351          */
1352
1353         if ((vmbus_proto_version == VERSION_WS2008) ||
1354                 (vmbus_proto_version == VERSION_WIN7)) {
1355
1356                 /* Since we are a child, we only need to check bit 0 */
1357                 if (sync_test_and_clear_bit(0, event->flags))
1358                         handled = true;
1359         } else {
1360                 /*
1361                  * Our host is win8 or above. The signaling mechanism
1362                  * has changed and we can directly look at the event page.
1363                  * If bit n is set then we have an interrup on the channel
1364                  * whose id is n.
1365                  */
1366                 handled = true;
1367         }
1368
1369         if (handled)
1370                 vmbus_chan_sched(hv_cpu);
1371
1372         page_addr = hv_cpu->synic_message_page;
1373         msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1374
1375         /* Check if there are actual msgs to be processed */
1376         if (msg->header.message_type != HVMSG_NONE) {
1377                 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1378                         hv_stimer0_isr();
1379                         vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1380                 } else
1381                         tasklet_schedule(&hv_cpu->msg_dpc);
1382         }
1383
1384         add_interrupt_randomness(vmbus_interrupt, 0);
1385 }
1386
1387 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1388 {
1389         vmbus_isr();
1390         return IRQ_HANDLED;
1391 }
1392
1393 /*
1394  * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1395  * buffer and call into Hyper-V to transfer the data.
1396  */
1397 static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1398                          enum kmsg_dump_reason reason)
1399 {
1400         struct kmsg_dump_iter iter;
1401         size_t bytes_written;
1402
1403         /* We are only interested in panics. */
1404         if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1405                 return;
1406
1407         /*
1408          * Write dump contents to the page. No need to synchronize; panic should
1409          * be single-threaded.
1410          */
1411         kmsg_dump_rewind(&iter);
1412         kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
1413                              &bytes_written);
1414         if (!bytes_written)
1415                 return;
1416         /*
1417          * P3 to contain the physical address of the panic page & P4 to
1418          * contain the size of the panic data in that page. Rest of the
1419          * registers are no-op when the NOTIFY_MSG flag is set.
1420          */
1421         hv_set_register(HV_REGISTER_CRASH_P0, 0);
1422         hv_set_register(HV_REGISTER_CRASH_P1, 0);
1423         hv_set_register(HV_REGISTER_CRASH_P2, 0);
1424         hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
1425         hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
1426
1427         /*
1428          * Let Hyper-V know there is crash data available along with
1429          * the panic message.
1430          */
1431         hv_set_register(HV_REGISTER_CRASH_CTL,
1432                (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
1433 }
1434
1435 static struct kmsg_dumper hv_kmsg_dumper = {
1436         .dump = hv_kmsg_dump,
1437 };
1438
1439 static void hv_kmsg_dump_register(void)
1440 {
1441         int ret;
1442
1443         hv_panic_page = hv_alloc_hyperv_zeroed_page();
1444         if (!hv_panic_page) {
1445                 pr_err("Hyper-V: panic message page memory allocation failed\n");
1446                 return;
1447         }
1448
1449         ret = kmsg_dump_register(&hv_kmsg_dumper);
1450         if (ret) {
1451                 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
1452                 hv_free_hyperv_page((unsigned long)hv_panic_page);
1453                 hv_panic_page = NULL;
1454         }
1455 }
1456
1457 static struct ctl_table_header *hv_ctl_table_hdr;
1458
1459 /*
1460  * sysctl option to allow the user to control whether kmsg data should be
1461  * reported to Hyper-V on panic.
1462  */
1463 static struct ctl_table hv_ctl_table[] = {
1464         {
1465                 .procname       = "hyperv_record_panic_msg",
1466                 .data           = &sysctl_record_panic_msg,
1467                 .maxlen         = sizeof(int),
1468                 .mode           = 0644,
1469                 .proc_handler   = proc_dointvec_minmax,
1470                 .extra1         = SYSCTL_ZERO,
1471                 .extra2         = SYSCTL_ONE
1472         },
1473         {}
1474 };
1475
1476 static struct ctl_table hv_root_table[] = {
1477         {
1478                 .procname       = "kernel",
1479                 .mode           = 0555,
1480                 .child          = hv_ctl_table
1481         },
1482         {}
1483 };
1484
1485 /*
1486  * vmbus_bus_init -Main vmbus driver initialization routine.
1487  *
1488  * Here, we
1489  *      - initialize the vmbus driver context
1490  *      - invoke the vmbus hv main init routine
1491  *      - retrieve the channel offers
1492  */
1493 static int vmbus_bus_init(void)
1494 {
1495         int ret;
1496
1497         ret = hv_init();
1498         if (ret != 0) {
1499                 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1500                 return ret;
1501         }
1502
1503         ret = bus_register(&hv_bus);
1504         if (ret)
1505                 return ret;
1506
1507         /*
1508          * VMbus interrupts are best modeled as per-cpu interrupts. If
1509          * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1510          * allocate a per-cpu IRQ using standard Linux kernel functionality.
1511          * If not on such an architecture (e.g., x86/x64), then rely on
1512          * code in the arch-specific portion of the code tree to connect
1513          * the VMbus interrupt handler.
1514          */
1515
1516         if (vmbus_irq == -1) {
1517                 hv_setup_vmbus_handler(vmbus_isr);
1518         } else {
1519                 vmbus_evt = alloc_percpu(long);
1520                 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1521                                 "Hyper-V VMbus", vmbus_evt);
1522                 if (ret) {
1523                         pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1524                                         vmbus_irq, ret);
1525                         free_percpu(vmbus_evt);
1526                         goto err_setup;
1527                 }
1528         }
1529
1530         ret = hv_synic_alloc();
1531         if (ret)
1532                 goto err_alloc;
1533
1534         /*
1535          * Initialize the per-cpu interrupt state and stimer state.
1536          * Then connect to the host.
1537          */
1538         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1539                                 hv_synic_init, hv_synic_cleanup);
1540         if (ret < 0)
1541                 goto err_cpuhp;
1542         hyperv_cpuhp_online = ret;
1543
1544         ret = vmbus_connect();
1545         if (ret)
1546                 goto err_connect;
1547
1548         /*
1549          * Only register if the crash MSRs are available
1550          */
1551         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1552                 u64 hyperv_crash_ctl;
1553                 /*
1554                  * Sysctl registration is not fatal, since by default
1555                  * reporting is enabled.
1556                  */
1557                 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1558                 if (!hv_ctl_table_hdr)
1559                         pr_err("Hyper-V: sysctl table register error");
1560
1561                 /*
1562                  * Register for panic kmsg callback only if the right
1563                  * capability is supported by the hypervisor.
1564                  */
1565                 hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
1566                 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
1567                         hv_kmsg_dump_register();
1568
1569                 register_die_notifier(&hyperv_die_block);
1570         }
1571
1572         /*
1573          * Always register the panic notifier because we need to unload
1574          * the VMbus channel connection to prevent any VMbus
1575          * activity after the VM panics.
1576          */
1577         atomic_notifier_chain_register(&panic_notifier_list,
1578                                &hyperv_panic_block);
1579
1580         vmbus_request_offers();
1581
1582         return 0;
1583
1584 err_connect:
1585         cpuhp_remove_state(hyperv_cpuhp_online);
1586 err_cpuhp:
1587         hv_synic_free();
1588 err_alloc:
1589         if (vmbus_irq == -1) {
1590                 hv_remove_vmbus_handler();
1591         } else {
1592                 free_percpu_irq(vmbus_irq, vmbus_evt);
1593                 free_percpu(vmbus_evt);
1594         }
1595 err_setup:
1596         bus_unregister(&hv_bus);
1597         unregister_sysctl_table(hv_ctl_table_hdr);
1598         hv_ctl_table_hdr = NULL;
1599         return ret;
1600 }
1601
1602 /**
1603  * __vmbus_child_driver_register() - Register a vmbus's driver
1604  * @hv_driver: Pointer to driver structure you want to register
1605  * @owner: owner module of the drv
1606  * @mod_name: module name string
1607  *
1608  * Registers the given driver with Linux through the 'driver_register()' call
1609  * and sets up the hyper-v vmbus handling for this driver.
1610  * It will return the state of the 'driver_register()' call.
1611  *
1612  */
1613 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1614 {
1615         int ret;
1616
1617         pr_info("registering driver %s\n", hv_driver->name);
1618
1619         ret = vmbus_exists();
1620         if (ret < 0)
1621                 return ret;
1622
1623         hv_driver->driver.name = hv_driver->name;
1624         hv_driver->driver.owner = owner;
1625         hv_driver->driver.mod_name = mod_name;
1626         hv_driver->driver.bus = &hv_bus;
1627
1628         spin_lock_init(&hv_driver->dynids.lock);
1629         INIT_LIST_HEAD(&hv_driver->dynids.list);
1630
1631         ret = driver_register(&hv_driver->driver);
1632
1633         return ret;
1634 }
1635 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1636
1637 /**
1638  * vmbus_driver_unregister() - Unregister a vmbus's driver
1639  * @hv_driver: Pointer to driver structure you want to
1640  *             un-register
1641  *
1642  * Un-register the given driver that was previous registered with a call to
1643  * vmbus_driver_register()
1644  */
1645 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1646 {
1647         pr_info("unregistering driver %s\n", hv_driver->name);
1648
1649         if (!vmbus_exists()) {
1650                 driver_unregister(&hv_driver->driver);
1651                 vmbus_free_dynids(hv_driver);
1652         }
1653 }
1654 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1655
1656
1657 /*
1658  * Called when last reference to channel is gone.
1659  */
1660 static void vmbus_chan_release(struct kobject *kobj)
1661 {
1662         struct vmbus_channel *channel
1663                 = container_of(kobj, struct vmbus_channel, kobj);
1664
1665         kfree_rcu(channel, rcu);
1666 }
1667
1668 struct vmbus_chan_attribute {
1669         struct attribute attr;
1670         ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1671         ssize_t (*store)(struct vmbus_channel *chan,
1672                          const char *buf, size_t count);
1673 };
1674 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1675         struct vmbus_chan_attribute chan_attr_##_name \
1676                 = __ATTR(_name, _mode, _show, _store)
1677 #define VMBUS_CHAN_ATTR_RW(_name) \
1678         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1679 #define VMBUS_CHAN_ATTR_RO(_name) \
1680         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1681 #define VMBUS_CHAN_ATTR_WO(_name) \
1682         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1683
1684 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1685                                     struct attribute *attr, char *buf)
1686 {
1687         const struct vmbus_chan_attribute *attribute
1688                 = container_of(attr, struct vmbus_chan_attribute, attr);
1689         struct vmbus_channel *chan
1690                 = container_of(kobj, struct vmbus_channel, kobj);
1691
1692         if (!attribute->show)
1693                 return -EIO;
1694
1695         return attribute->show(chan, buf);
1696 }
1697
1698 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1699                                      struct attribute *attr, const char *buf,
1700                                      size_t count)
1701 {
1702         const struct vmbus_chan_attribute *attribute
1703                 = container_of(attr, struct vmbus_chan_attribute, attr);
1704         struct vmbus_channel *chan
1705                 = container_of(kobj, struct vmbus_channel, kobj);
1706
1707         if (!attribute->store)
1708                 return -EIO;
1709
1710         return attribute->store(chan, buf, count);
1711 }
1712
1713 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1714         .show = vmbus_chan_attr_show,
1715         .store = vmbus_chan_attr_store,
1716 };
1717
1718 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1719 {
1720         struct hv_ring_buffer_info *rbi = &channel->outbound;
1721         ssize_t ret;
1722
1723         mutex_lock(&rbi->ring_buffer_mutex);
1724         if (!rbi->ring_buffer) {
1725                 mutex_unlock(&rbi->ring_buffer_mutex);
1726                 return -EINVAL;
1727         }
1728
1729         ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1730         mutex_unlock(&rbi->ring_buffer_mutex);
1731         return ret;
1732 }
1733 static VMBUS_CHAN_ATTR_RO(out_mask);
1734
1735 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1736 {
1737         struct hv_ring_buffer_info *rbi = &channel->inbound;
1738         ssize_t ret;
1739
1740         mutex_lock(&rbi->ring_buffer_mutex);
1741         if (!rbi->ring_buffer) {
1742                 mutex_unlock(&rbi->ring_buffer_mutex);
1743                 return -EINVAL;
1744         }
1745
1746         ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1747         mutex_unlock(&rbi->ring_buffer_mutex);
1748         return ret;
1749 }
1750 static VMBUS_CHAN_ATTR_RO(in_mask);
1751
1752 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1753 {
1754         struct hv_ring_buffer_info *rbi = &channel->inbound;
1755         ssize_t ret;
1756
1757         mutex_lock(&rbi->ring_buffer_mutex);
1758         if (!rbi->ring_buffer) {
1759                 mutex_unlock(&rbi->ring_buffer_mutex);
1760                 return -EINVAL;
1761         }
1762
1763         ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1764         mutex_unlock(&rbi->ring_buffer_mutex);
1765         return ret;
1766 }
1767 static VMBUS_CHAN_ATTR_RO(read_avail);
1768
1769 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1770 {
1771         struct hv_ring_buffer_info *rbi = &channel->outbound;
1772         ssize_t ret;
1773
1774         mutex_lock(&rbi->ring_buffer_mutex);
1775         if (!rbi->ring_buffer) {
1776                 mutex_unlock(&rbi->ring_buffer_mutex);
1777                 return -EINVAL;
1778         }
1779
1780         ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1781         mutex_unlock(&rbi->ring_buffer_mutex);
1782         return ret;
1783 }
1784 static VMBUS_CHAN_ATTR_RO(write_avail);
1785
1786 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1787 {
1788         return sprintf(buf, "%u\n", channel->target_cpu);
1789 }
1790 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1791                                 const char *buf, size_t count)
1792 {
1793         u32 target_cpu, origin_cpu;
1794         ssize_t ret = count;
1795
1796         if (vmbus_proto_version < VERSION_WIN10_V4_1)
1797                 return -EIO;
1798
1799         if (sscanf(buf, "%uu", &target_cpu) != 1)
1800                 return -EIO;
1801
1802         /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1803         if (target_cpu >= nr_cpumask_bits)
1804                 return -EINVAL;
1805
1806         /* No CPUs should come up or down during this. */
1807         cpus_read_lock();
1808
1809         if (!cpu_online(target_cpu)) {
1810                 cpus_read_unlock();
1811                 return -EINVAL;
1812         }
1813
1814         /*
1815          * Synchronizes target_cpu_store() and channel closure:
1816          *
1817          * { Initially: state = CHANNEL_OPENED }
1818          *
1819          * CPU1                         CPU2
1820          *
1821          * [target_cpu_store()]         [vmbus_disconnect_ring()]
1822          *
1823          * LOCK channel_mutex           LOCK channel_mutex
1824          * LOAD r1 = state              LOAD r2 = state
1825          * IF (r1 == CHANNEL_OPENED)    IF (r2 == CHANNEL_OPENED)
1826          *   SEND MODIFYCHANNEL           STORE state = CHANNEL_OPEN
1827          *   [...]                        SEND CLOSECHANNEL
1828          * UNLOCK channel_mutex         UNLOCK channel_mutex
1829          *
1830          * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1831          *              CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1832          *
1833          * Note.  The host processes the channel messages "sequentially", in
1834          * the order in which they are received on a per-partition basis.
1835          */
1836         mutex_lock(&vmbus_connection.channel_mutex);
1837
1838         /*
1839          * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1840          * avoid sending the message and fail here for such channels.
1841          */
1842         if (channel->state != CHANNEL_OPENED_STATE) {
1843                 ret = -EIO;
1844                 goto cpu_store_unlock;
1845         }
1846
1847         origin_cpu = channel->target_cpu;
1848         if (target_cpu == origin_cpu)
1849                 goto cpu_store_unlock;
1850
1851         if (vmbus_send_modifychannel(channel,
1852                                      hv_cpu_number_to_vp_number(target_cpu))) {
1853                 ret = -EIO;
1854                 goto cpu_store_unlock;
1855         }
1856
1857         /*
1858          * For version before VERSION_WIN10_V5_3, the following warning holds:
1859          *
1860          * Warning.  At this point, there is *no* guarantee that the host will
1861          * have successfully processed the vmbus_send_modifychannel() request.
1862          * See the header comment of vmbus_send_modifychannel() for more info.
1863          *
1864          * Lags in the processing of the above vmbus_send_modifychannel() can
1865          * result in missed interrupts if the "old" target CPU is taken offline
1866          * before Hyper-V starts sending interrupts to the "new" target CPU.
1867          * But apart from this offlining scenario, the code tolerates such
1868          * lags.  It will function correctly even if a channel interrupt comes
1869          * in on a CPU that is different from the channel target_cpu value.
1870          */
1871
1872         channel->target_cpu = target_cpu;
1873
1874         /* See init_vp_index(). */
1875         if (hv_is_perf_channel(channel))
1876                 hv_update_alloced_cpus(origin_cpu, target_cpu);
1877
1878         /* Currently set only for storvsc channels. */
1879         if (channel->change_target_cpu_callback) {
1880                 (*channel->change_target_cpu_callback)(channel,
1881                                 origin_cpu, target_cpu);
1882         }
1883
1884 cpu_store_unlock:
1885         mutex_unlock(&vmbus_connection.channel_mutex);
1886         cpus_read_unlock();
1887         return ret;
1888 }
1889 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1890
1891 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1892                                     char *buf)
1893 {
1894         return sprintf(buf, "%d\n",
1895                        channel_pending(channel,
1896                                        vmbus_connection.monitor_pages[1]));
1897 }
1898 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1899
1900 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1901                                     char *buf)
1902 {
1903         return sprintf(buf, "%d\n",
1904                        channel_latency(channel,
1905                                        vmbus_connection.monitor_pages[1]));
1906 }
1907 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1908
1909 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1910 {
1911         return sprintf(buf, "%llu\n", channel->interrupts);
1912 }
1913 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1914
1915 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1916 {
1917         return sprintf(buf, "%llu\n", channel->sig_events);
1918 }
1919 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1920
1921 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1922                                          char *buf)
1923 {
1924         return sprintf(buf, "%llu\n",
1925                        (unsigned long long)channel->intr_in_full);
1926 }
1927 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1928
1929 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1930                                            char *buf)
1931 {
1932         return sprintf(buf, "%llu\n",
1933                        (unsigned long long)channel->intr_out_empty);
1934 }
1935 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1936
1937 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1938                                            char *buf)
1939 {
1940         return sprintf(buf, "%llu\n",
1941                        (unsigned long long)channel->out_full_first);
1942 }
1943 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1944
1945 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1946                                            char *buf)
1947 {
1948         return sprintf(buf, "%llu\n",
1949                        (unsigned long long)channel->out_full_total);
1950 }
1951 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1952
1953 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1954                                           char *buf)
1955 {
1956         return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1957 }
1958 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1959
1960 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1961                                   char *buf)
1962 {
1963         return sprintf(buf, "%u\n",
1964                        channel->offermsg.offer.sub_channel_index);
1965 }
1966 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1967
1968 static struct attribute *vmbus_chan_attrs[] = {
1969         &chan_attr_out_mask.attr,
1970         &chan_attr_in_mask.attr,
1971         &chan_attr_read_avail.attr,
1972         &chan_attr_write_avail.attr,
1973         &chan_attr_cpu.attr,
1974         &chan_attr_pending.attr,
1975         &chan_attr_latency.attr,
1976         &chan_attr_interrupts.attr,
1977         &chan_attr_events.attr,
1978         &chan_attr_intr_in_full.attr,
1979         &chan_attr_intr_out_empty.attr,
1980         &chan_attr_out_full_first.attr,
1981         &chan_attr_out_full_total.attr,
1982         &chan_attr_monitor_id.attr,
1983         &chan_attr_subchannel_id.attr,
1984         NULL
1985 };
1986
1987 /*
1988  * Channel-level attribute_group callback function. Returns the permission for
1989  * each attribute, and returns 0 if an attribute is not visible.
1990  */
1991 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1992                                           struct attribute *attr, int idx)
1993 {
1994         const struct vmbus_channel *channel =
1995                 container_of(kobj, struct vmbus_channel, kobj);
1996
1997         /* Hide the monitor attributes if the monitor mechanism is not used. */
1998         if (!channel->offermsg.monitor_allocated &&
1999             (attr == &chan_attr_pending.attr ||
2000              attr == &chan_attr_latency.attr ||
2001              attr == &chan_attr_monitor_id.attr))
2002                 return 0;
2003
2004         return attr->mode;
2005 }
2006
2007 static struct attribute_group vmbus_chan_group = {
2008         .attrs = vmbus_chan_attrs,
2009         .is_visible = vmbus_chan_attr_is_visible
2010 };
2011
2012 static struct kobj_type vmbus_chan_ktype = {
2013         .sysfs_ops = &vmbus_chan_sysfs_ops,
2014         .release = vmbus_chan_release,
2015 };
2016
2017 /*
2018  * vmbus_add_channel_kobj - setup a sub-directory under device/channels
2019  */
2020 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2021 {
2022         const struct device *device = &dev->device;
2023         struct kobject *kobj = &channel->kobj;
2024         u32 relid = channel->offermsg.child_relid;
2025         int ret;
2026
2027         kobj->kset = dev->channels_kset;
2028         ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2029                                    "%u", relid);
2030         if (ret)
2031                 return ret;
2032
2033         ret = sysfs_create_group(kobj, &vmbus_chan_group);
2034
2035         if (ret) {
2036                 /*
2037                  * The calling functions' error handling paths will cleanup the
2038                  * empty channel directory.
2039                  */
2040                 dev_err(device, "Unable to set up channel sysfs files\n");
2041                 return ret;
2042         }
2043
2044         kobject_uevent(kobj, KOBJ_ADD);
2045
2046         return 0;
2047 }
2048
2049 /*
2050  * vmbus_remove_channel_attr_group - remove the channel's attribute group
2051  */
2052 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2053 {
2054         sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2055 }
2056
2057 /*
2058  * vmbus_device_create - Creates and registers a new child device
2059  * on the vmbus.
2060  */
2061 struct hv_device *vmbus_device_create(const guid_t *type,
2062                                       const guid_t *instance,
2063                                       struct vmbus_channel *channel)
2064 {
2065         struct hv_device *child_device_obj;
2066
2067         child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2068         if (!child_device_obj) {
2069                 pr_err("Unable to allocate device object for child device\n");
2070                 return NULL;
2071         }
2072
2073         child_device_obj->channel = channel;
2074         guid_copy(&child_device_obj->dev_type, type);
2075         guid_copy(&child_device_obj->dev_instance, instance);
2076         child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
2077
2078         return child_device_obj;
2079 }
2080
2081 /*
2082  * vmbus_device_register - Register the child device
2083  */
2084 int vmbus_device_register(struct hv_device *child_device_obj)
2085 {
2086         struct kobject *kobj = &child_device_obj->device.kobj;
2087         int ret;
2088
2089         dev_set_name(&child_device_obj->device, "%pUl",
2090                      &child_device_obj->channel->offermsg.offer.if_instance);
2091
2092         child_device_obj->device.bus = &hv_bus;
2093         child_device_obj->device.parent = &hv_acpi_dev->dev;
2094         child_device_obj->device.release = vmbus_device_release;
2095
2096         /*
2097          * Register with the LDM. This will kick off the driver/device
2098          * binding...which will eventually call vmbus_match() and vmbus_probe()
2099          */
2100         ret = device_register(&child_device_obj->device);
2101         if (ret) {
2102                 pr_err("Unable to register child device\n");
2103                 return ret;
2104         }
2105
2106         child_device_obj->channels_kset = kset_create_and_add("channels",
2107                                                               NULL, kobj);
2108         if (!child_device_obj->channels_kset) {
2109                 ret = -ENOMEM;
2110                 goto err_dev_unregister;
2111         }
2112
2113         ret = vmbus_add_channel_kobj(child_device_obj,
2114                                      child_device_obj->channel);
2115         if (ret) {
2116                 pr_err("Unable to register primary channeln");
2117                 goto err_kset_unregister;
2118         }
2119         hv_debug_add_dev_dir(child_device_obj);
2120
2121         return 0;
2122
2123 err_kset_unregister:
2124         kset_unregister(child_device_obj->channels_kset);
2125
2126 err_dev_unregister:
2127         device_unregister(&child_device_obj->device);
2128         return ret;
2129 }
2130
2131 /*
2132  * vmbus_device_unregister - Remove the specified child device
2133  * from the vmbus.
2134  */
2135 void vmbus_device_unregister(struct hv_device *device_obj)
2136 {
2137         pr_debug("child device %s unregistered\n",
2138                 dev_name(&device_obj->device));
2139
2140         kset_unregister(device_obj->channels_kset);
2141
2142         /*
2143          * Kick off the process of unregistering the device.
2144          * This will call vmbus_remove() and eventually vmbus_device_release()
2145          */
2146         device_unregister(&device_obj->device);
2147 }
2148
2149
2150 /*
2151  * VMBUS is an acpi enumerated device. Get the information we
2152  * need from DSDT.
2153  */
2154 #define VTPM_BASE_ADDRESS 0xfed40000
2155 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2156 {
2157         resource_size_t start = 0;
2158         resource_size_t end = 0;
2159         struct resource *new_res;
2160         struct resource **old_res = &hyperv_mmio;
2161         struct resource **prev_res = NULL;
2162         struct resource r;
2163
2164         switch (res->type) {
2165
2166         /*
2167          * "Address" descriptors are for bus windows. Ignore
2168          * "memory" descriptors, which are for registers on
2169          * devices.
2170          */
2171         case ACPI_RESOURCE_TYPE_ADDRESS32:
2172                 start = res->data.address32.address.minimum;
2173                 end = res->data.address32.address.maximum;
2174                 break;
2175
2176         case ACPI_RESOURCE_TYPE_ADDRESS64:
2177                 start = res->data.address64.address.minimum;
2178                 end = res->data.address64.address.maximum;
2179                 break;
2180
2181         /*
2182          * The IRQ information is needed only on ARM64, which Hyper-V
2183          * sets up in the extended format. IRQ information is present
2184          * on x86/x64 in the non-extended format but it is not used by
2185          * Linux. So don't bother checking for the non-extended format.
2186          */
2187         case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2188                 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2189                         pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2190                         return AE_ERROR;
2191                 }
2192                 /* ARM64 INTID for VMbus */
2193                 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2194                 /* Linux IRQ number */
2195                 vmbus_irq = r.start;
2196                 return AE_OK;
2197
2198         default:
2199                 /* Unused resource type */
2200                 return AE_OK;
2201
2202         }
2203         /*
2204          * Ignore ranges that are below 1MB, as they're not
2205          * necessary or useful here.
2206          */
2207         if (end < 0x100000)
2208                 return AE_OK;
2209
2210         new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2211         if (!new_res)
2212                 return AE_NO_MEMORY;
2213
2214         /* If this range overlaps the virtual TPM, truncate it. */
2215         if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2216                 end = VTPM_BASE_ADDRESS;
2217
2218         new_res->name = "hyperv mmio";
2219         new_res->flags = IORESOURCE_MEM;
2220         new_res->start = start;
2221         new_res->end = end;
2222
2223         /*
2224          * If two ranges are adjacent, merge them.
2225          */
2226         do {
2227                 if (!*old_res) {
2228                         *old_res = new_res;
2229                         break;
2230                 }
2231
2232                 if (((*old_res)->end + 1) == new_res->start) {
2233                         (*old_res)->end = new_res->end;
2234                         kfree(new_res);
2235                         break;
2236                 }
2237
2238                 if ((*old_res)->start == new_res->end + 1) {
2239                         (*old_res)->start = new_res->start;
2240                         kfree(new_res);
2241                         break;
2242                 }
2243
2244                 if ((*old_res)->start > new_res->end) {
2245                         new_res->sibling = *old_res;
2246                         if (prev_res)
2247                                 (*prev_res)->sibling = new_res;
2248                         *old_res = new_res;
2249                         break;
2250                 }
2251
2252                 prev_res = old_res;
2253                 old_res = &(*old_res)->sibling;
2254
2255         } while (1);
2256
2257         return AE_OK;
2258 }
2259
2260 static int vmbus_acpi_remove(struct acpi_device *device)
2261 {
2262         struct resource *cur_res;
2263         struct resource *next_res;
2264
2265         if (hyperv_mmio) {
2266                 if (fb_mmio) {
2267                         __release_region(hyperv_mmio, fb_mmio->start,
2268                                          resource_size(fb_mmio));
2269                         fb_mmio = NULL;
2270                 }
2271
2272                 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2273                         next_res = cur_res->sibling;
2274                         kfree(cur_res);
2275                 }
2276         }
2277
2278         return 0;
2279 }
2280
2281 static void vmbus_reserve_fb(void)
2282 {
2283         int size;
2284         /*
2285          * Make a claim for the frame buffer in the resource tree under the
2286          * first node, which will be the one below 4GB.  The length seems to
2287          * be underreported, particularly in a Generation 1 VM.  So start out
2288          * reserving a larger area and make it smaller until it succeeds.
2289          */
2290
2291         if (screen_info.lfb_base) {
2292                 if (efi_enabled(EFI_BOOT))
2293                         size = max_t(__u32, screen_info.lfb_size, 0x800000);
2294                 else
2295                         size = max_t(__u32, screen_info.lfb_size, 0x4000000);
2296
2297                 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
2298                         fb_mmio = __request_region(hyperv_mmio,
2299                                                    screen_info.lfb_base, size,
2300                                                    fb_mmio_name, 0);
2301                 }
2302         }
2303 }
2304
2305 /**
2306  * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2307  * @new:                If successful, supplied a pointer to the
2308  *                      allocated MMIO space.
2309  * @device_obj:         Identifies the caller
2310  * @min:                Minimum guest physical address of the
2311  *                      allocation
2312  * @max:                Maximum guest physical address
2313  * @size:               Size of the range to be allocated
2314  * @align:              Alignment of the range to be allocated
2315  * @fb_overlap_ok:      Whether this allocation can be allowed
2316  *                      to overlap the video frame buffer.
2317  *
2318  * This function walks the resources granted to VMBus by the
2319  * _CRS object in the ACPI namespace underneath the parent
2320  * "bridge" whether that's a root PCI bus in the Generation 1
2321  * case or a Module Device in the Generation 2 case.  It then
2322  * attempts to allocate from the global MMIO pool in a way that
2323  * matches the constraints supplied in these parameters and by
2324  * that _CRS.
2325  *
2326  * Return: 0 on success, -errno on failure
2327  */
2328 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2329                         resource_size_t min, resource_size_t max,
2330                         resource_size_t size, resource_size_t align,
2331                         bool fb_overlap_ok)
2332 {
2333         struct resource *iter, *shadow;
2334         resource_size_t range_min, range_max, start;
2335         const char *dev_n = dev_name(&device_obj->device);
2336         int retval;
2337
2338         retval = -ENXIO;
2339         mutex_lock(&hyperv_mmio_lock);
2340
2341         /*
2342          * If overlaps with frame buffers are allowed, then first attempt to
2343          * make the allocation from within the reserved region.  Because it
2344          * is already reserved, no shadow allocation is necessary.
2345          */
2346         if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2347             !(max < fb_mmio->start)) {
2348
2349                 range_min = fb_mmio->start;
2350                 range_max = fb_mmio->end;
2351                 start = (range_min + align - 1) & ~(align - 1);
2352                 for (; start + size - 1 <= range_max; start += align) {
2353                         *new = request_mem_region_exclusive(start, size, dev_n);
2354                         if (*new) {
2355                                 retval = 0;
2356                                 goto exit;
2357                         }
2358                 }
2359         }
2360
2361         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2362                 if ((iter->start >= max) || (iter->end <= min))
2363                         continue;
2364
2365                 range_min = iter->start;
2366                 range_max = iter->end;
2367                 start = (range_min + align - 1) & ~(align - 1);
2368                 for (; start + size - 1 <= range_max; start += align) {
2369                         shadow = __request_region(iter, start, size, NULL,
2370                                                   IORESOURCE_BUSY);
2371                         if (!shadow)
2372                                 continue;
2373
2374                         *new = request_mem_region_exclusive(start, size, dev_n);
2375                         if (*new) {
2376                                 shadow->name = (char *)*new;
2377                                 retval = 0;
2378                                 goto exit;
2379                         }
2380
2381                         __release_region(iter, start, size);
2382                 }
2383         }
2384
2385 exit:
2386         mutex_unlock(&hyperv_mmio_lock);
2387         return retval;
2388 }
2389 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2390
2391 /**
2392  * vmbus_free_mmio() - Free a memory-mapped I/O range.
2393  * @start:              Base address of region to release.
2394  * @size:               Size of the range to be allocated
2395  *
2396  * This function releases anything requested by
2397  * vmbus_mmio_allocate().
2398  */
2399 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2400 {
2401         struct resource *iter;
2402
2403         mutex_lock(&hyperv_mmio_lock);
2404         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2405                 if ((iter->start >= start + size) || (iter->end <= start))
2406                         continue;
2407
2408                 __release_region(iter, start, size);
2409         }
2410         release_mem_region(start, size);
2411         mutex_unlock(&hyperv_mmio_lock);
2412
2413 }
2414 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2415
2416 static int vmbus_acpi_add(struct acpi_device *device)
2417 {
2418         acpi_status result;
2419         int ret_val = -ENODEV;
2420         struct acpi_device *ancestor;
2421
2422         hv_acpi_dev = device;
2423
2424         result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2425                                         vmbus_walk_resources, NULL);
2426
2427         if (ACPI_FAILURE(result))
2428                 goto acpi_walk_err;
2429         /*
2430          * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2431          * firmware) is the VMOD that has the mmio ranges. Get that.
2432          */
2433         for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
2434                 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2435                                              vmbus_walk_resources, NULL);
2436
2437                 if (ACPI_FAILURE(result))
2438                         continue;
2439                 if (hyperv_mmio) {
2440                         vmbus_reserve_fb();
2441                         break;
2442                 }
2443         }
2444         ret_val = 0;
2445
2446 acpi_walk_err:
2447         complete(&probe_event);
2448         if (ret_val)
2449                 vmbus_acpi_remove(device);
2450         return ret_val;
2451 }
2452
2453 #ifdef CONFIG_PM_SLEEP
2454 static int vmbus_bus_suspend(struct device *dev)
2455 {
2456         struct vmbus_channel *channel, *sc;
2457
2458         while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2459                 /*
2460                  * We wait here until the completion of any channel
2461                  * offers that are currently in progress.
2462                  */
2463                 usleep_range(1000, 2000);
2464         }
2465
2466         mutex_lock(&vmbus_connection.channel_mutex);
2467         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2468                 if (!is_hvsock_channel(channel))
2469                         continue;
2470
2471                 vmbus_force_channel_rescinded(channel);
2472         }
2473         mutex_unlock(&vmbus_connection.channel_mutex);
2474
2475         /*
2476          * Wait until all the sub-channels and hv_sock channels have been
2477          * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2478          * they would conflict with the new sub-channels that will be created
2479          * in the resume path. hv_sock channels should also be destroyed, but
2480          * a hv_sock channel of an established hv_sock connection can not be
2481          * really destroyed since it may still be referenced by the userspace
2482          * application, so we just force the hv_sock channel to be rescinded
2483          * by vmbus_force_channel_rescinded(), and the userspace application
2484          * will thoroughly destroy the channel after hibernation.
2485          *
2486          * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2487          * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2488          */
2489         if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2490                 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2491
2492         if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2493                 pr_err("Can not suspend due to a previous failed resuming\n");
2494                 return -EBUSY;
2495         }
2496
2497         mutex_lock(&vmbus_connection.channel_mutex);
2498
2499         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2500                 /*
2501                  * Remove the channel from the array of channels and invalidate
2502                  * the channel's relid.  Upon resume, vmbus_onoffer() will fix
2503                  * up the relid (and other fields, if necessary) and add the
2504                  * channel back to the array.
2505                  */
2506                 vmbus_channel_unmap_relid(channel);
2507                 channel->offermsg.child_relid = INVALID_RELID;
2508
2509                 if (is_hvsock_channel(channel)) {
2510                         if (!channel->rescind) {
2511                                 pr_err("hv_sock channel not rescinded!\n");
2512                                 WARN_ON_ONCE(1);
2513                         }
2514                         continue;
2515                 }
2516
2517                 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2518                         pr_err("Sub-channel not deleted!\n");
2519                         WARN_ON_ONCE(1);
2520                 }
2521
2522                 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2523         }
2524
2525         mutex_unlock(&vmbus_connection.channel_mutex);
2526
2527         vmbus_initiate_unload(false);
2528
2529         /* Reset the event for the next resume. */
2530         reinit_completion(&vmbus_connection.ready_for_resume_event);
2531
2532         return 0;
2533 }
2534
2535 static int vmbus_bus_resume(struct device *dev)
2536 {
2537         struct vmbus_channel_msginfo *msginfo;
2538         size_t msgsize;
2539         int ret;
2540
2541         /*
2542          * We only use the 'vmbus_proto_version', which was in use before
2543          * hibernation, to re-negotiate with the host.
2544          */
2545         if (!vmbus_proto_version) {
2546                 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2547                 return -EINVAL;
2548         }
2549
2550         msgsize = sizeof(*msginfo) +
2551                   sizeof(struct vmbus_channel_initiate_contact);
2552
2553         msginfo = kzalloc(msgsize, GFP_KERNEL);
2554
2555         if (msginfo == NULL)
2556                 return -ENOMEM;
2557
2558         ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2559
2560         kfree(msginfo);
2561
2562         if (ret != 0)
2563                 return ret;
2564
2565         WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2566
2567         vmbus_request_offers();
2568
2569         if (wait_for_completion_timeout(
2570                 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2571                 pr_err("Some vmbus device is missing after suspending?\n");
2572
2573         /* Reset the event for the next suspend. */
2574         reinit_completion(&vmbus_connection.ready_for_suspend_event);
2575
2576         return 0;
2577 }
2578 #else
2579 #define vmbus_bus_suspend NULL
2580 #define vmbus_bus_resume NULL
2581 #endif /* CONFIG_PM_SLEEP */
2582
2583 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2584         {"VMBUS", 0},
2585         {"VMBus", 0},
2586         {"", 0},
2587 };
2588 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2589
2590 /*
2591  * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2592  * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2593  * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2594  * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2595  * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2596  * resume callback must also run via the "noirq" ops.
2597  *
2598  * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2599  * earlier in this file before vmbus_pm.
2600  */
2601
2602 static const struct dev_pm_ops vmbus_bus_pm = {
2603         .suspend_noirq  = NULL,
2604         .resume_noirq   = NULL,
2605         .freeze_noirq   = vmbus_bus_suspend,
2606         .thaw_noirq     = vmbus_bus_resume,
2607         .poweroff_noirq = vmbus_bus_suspend,
2608         .restore_noirq  = vmbus_bus_resume
2609 };
2610
2611 static struct acpi_driver vmbus_acpi_driver = {
2612         .name = "vmbus",
2613         .ids = vmbus_acpi_device_ids,
2614         .ops = {
2615                 .add = vmbus_acpi_add,
2616                 .remove = vmbus_acpi_remove,
2617         },
2618         .drv.pm = &vmbus_bus_pm,
2619 };
2620
2621 static void hv_kexec_handler(void)
2622 {
2623         hv_stimer_global_cleanup();
2624         vmbus_initiate_unload(false);
2625         /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2626         mb();
2627         cpuhp_remove_state(hyperv_cpuhp_online);
2628 };
2629
2630 static void hv_crash_handler(struct pt_regs *regs)
2631 {
2632         int cpu;
2633
2634         vmbus_initiate_unload(true);
2635         /*
2636          * In crash handler we can't schedule synic cleanup for all CPUs,
2637          * doing the cleanup for current CPU only. This should be sufficient
2638          * for kdump.
2639          */
2640         cpu = smp_processor_id();
2641         hv_stimer_cleanup(cpu);
2642         hv_synic_disable_regs(cpu);
2643 };
2644
2645 static int hv_synic_suspend(void)
2646 {
2647         /*
2648          * When we reach here, all the non-boot CPUs have been offlined.
2649          * If we're in a legacy configuration where stimer Direct Mode is
2650          * not enabled, the stimers on the non-boot CPUs have been unbound
2651          * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2652          * hv_stimer_cleanup() -> clockevents_unbind_device().
2653          *
2654          * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2655          * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2656          * 1) it's unnecessary as interrupts remain disabled between
2657          * syscore_suspend() and syscore_resume(): see create_image() and
2658          * resume_target_kernel()
2659          * 2) the stimer on CPU0 is automatically disabled later by
2660          * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2661          * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2662          * 3) a warning would be triggered if we call
2663          * clockevents_unbind_device(), which may sleep, in an
2664          * interrupts-disabled context.
2665          */
2666
2667         hv_synic_disable_regs(0);
2668
2669         return 0;
2670 }
2671
2672 static void hv_synic_resume(void)
2673 {
2674         hv_synic_enable_regs(0);
2675
2676         /*
2677          * Note: we don't need to call hv_stimer_init(0), because the timer
2678          * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2679          * automatically re-enabled in timekeeping_resume().
2680          */
2681 }
2682
2683 /* The callbacks run only on CPU0, with irqs_disabled. */
2684 static struct syscore_ops hv_synic_syscore_ops = {
2685         .suspend = hv_synic_suspend,
2686         .resume = hv_synic_resume,
2687 };
2688
2689 static int __init hv_acpi_init(void)
2690 {
2691         int ret, t;
2692
2693         if (!hv_is_hyperv_initialized())
2694                 return -ENODEV;
2695
2696         if (hv_root_partition)
2697                 return 0;
2698
2699         init_completion(&probe_event);
2700
2701         /*
2702          * Get ACPI resources first.
2703          */
2704         ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2705
2706         if (ret)
2707                 return ret;
2708
2709         t = wait_for_completion_timeout(&probe_event, 5*HZ);
2710         if (t == 0) {
2711                 ret = -ETIMEDOUT;
2712                 goto cleanup;
2713         }
2714
2715         /*
2716          * If we're on an architecture with a hardcoded hypervisor
2717          * vector (i.e. x86/x64), override the VMbus interrupt found
2718          * in the ACPI tables. Ensure vmbus_irq is not set since the
2719          * normal Linux IRQ mechanism is not used in this case.
2720          */
2721 #ifdef HYPERVISOR_CALLBACK_VECTOR
2722         vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2723         vmbus_irq = -1;
2724 #endif
2725
2726         hv_debug_init();
2727
2728         ret = vmbus_bus_init();
2729         if (ret)
2730                 goto cleanup;
2731
2732         hv_setup_kexec_handler(hv_kexec_handler);
2733         hv_setup_crash_handler(hv_crash_handler);
2734
2735         register_syscore_ops(&hv_synic_syscore_ops);
2736
2737         return 0;
2738
2739 cleanup:
2740         acpi_bus_unregister_driver(&vmbus_acpi_driver);
2741         hv_acpi_dev = NULL;
2742         return ret;
2743 }
2744
2745 static void __exit vmbus_exit(void)
2746 {
2747         int cpu;
2748
2749         unregister_syscore_ops(&hv_synic_syscore_ops);
2750
2751         hv_remove_kexec_handler();
2752         hv_remove_crash_handler();
2753         vmbus_connection.conn_state = DISCONNECTED;
2754         hv_stimer_global_cleanup();
2755         vmbus_disconnect();
2756         if (vmbus_irq == -1) {
2757                 hv_remove_vmbus_handler();
2758         } else {
2759                 free_percpu_irq(vmbus_irq, vmbus_evt);
2760                 free_percpu(vmbus_evt);
2761         }
2762         for_each_online_cpu(cpu) {
2763                 struct hv_per_cpu_context *hv_cpu
2764                         = per_cpu_ptr(hv_context.cpu_context, cpu);
2765
2766                 tasklet_kill(&hv_cpu->msg_dpc);
2767         }
2768         hv_debug_rm_all_dir();
2769
2770         vmbus_free_channels();
2771         kfree(vmbus_connection.channels);
2772
2773         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2774                 kmsg_dump_unregister(&hv_kmsg_dumper);
2775                 unregister_die_notifier(&hyperv_die_block);
2776                 atomic_notifier_chain_unregister(&panic_notifier_list,
2777                                                  &hyperv_panic_block);
2778         }
2779
2780         free_page((unsigned long)hv_panic_page);
2781         unregister_sysctl_table(hv_ctl_table_hdr);
2782         hv_ctl_table_hdr = NULL;
2783         bus_unregister(&hv_bus);
2784
2785         cpuhp_remove_state(hyperv_cpuhp_online);
2786         hv_synic_free();
2787         acpi_bus_unregister_driver(&vmbus_acpi_driver);
2788 }
2789
2790
2791 MODULE_LICENSE("GPL");
2792 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2793
2794 subsys_initcall(hv_acpi_init);
2795 module_exit(vmbus_exit);