Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyper...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Oct 2020 17:32:10 +0000 (10:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Oct 2020 17:32:10 +0000 (10:32 -0700)
Pull Hyper-V updates from Wei Liu:

 - a series from Boqun Feng to support page size larger than 4K

 - a few miscellaneous clean-ups

* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  hv: clocksource: Add notrace attribute to read_hv_sched_clock_*() functions
  x86/hyperv: Remove aliases with X64 in their name
  PCI: hv: Document missing hv_pci_protocol_negotiation() parameter
  scsi: storvsc: Support PAGE_SIZE larger than 4K
  Driver: hv: util: Use VMBUS_RING_SIZE() for ringbuffer sizes
  HID: hyperv: Use VMBUS_RING_SIZE() for ringbuffer sizes
  Input: hyperv-keyboard: Use VMBUS_RING_SIZE() for ringbuffer sizes
  hv_netvsc: Use HV_HYP_PAGE_SIZE for Hyper-V communication
  hv: hyperv.h: Introduce some hvpfn helper functions
  Drivers: hv: vmbus: Move virt_to_hvpfn() to hyperv header
  Drivers: hv: Use HV_HYP_PAGE in hv_synic_enable_regs()
  Drivers: hv: vmbus: Introduce types of GPADL
  Drivers: hv: vmbus: Move __vmbus_open()
  Drivers: hv: vmbus: Always use HV_HYP_PAGE_SIZE for gpadl
  drivers: hv: remove cast from hyperv_die_event

1  2 
drivers/hv/hv_util.c
drivers/hv/vmbus_drv.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/pci/controller/pci-hyperv.c

diff --combined drivers/hv/hv_util.c
@@@ -282,52 -282,26 +282,52 @@@ static struct 
        spinlock_t                      lock;
  } host_ts;
  
 -static struct timespec64 hv_get_adj_host_time(void)
 +static inline u64 reftime_to_ns(u64 reftime)
  {
 -      struct timespec64 ts;
 -      u64 newtime, reftime;
 +      return (reftime - WLTIMEDELTA) * 100;
 +}
 +
 +/*
 + * Hard coded threshold for host timesync delay: 600 seconds
 + */
 +static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
 +
 +static int hv_get_adj_host_time(struct timespec64 *ts)
 +{
 +      u64 newtime, reftime, timediff_adj;
        unsigned long flags;
 +      int ret = 0;
  
        spin_lock_irqsave(&host_ts.lock, flags);
        reftime = hv_read_reference_counter();
 -      newtime = host_ts.host_time + (reftime - host_ts.ref_time);
 -      ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
 +
 +      /*
 +       * We need to let the caller know that last update from host
 +       * is older than the max allowable threshold. clock_gettime()
 +       * and PTP ioctl do not have a documented error that we could
 +       * return for this specific case. Use ESTALE to report this.
 +       */
 +      timediff_adj = reftime - host_ts.ref_time;
 +      if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
 +              pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
 +                           (timediff_adj * 100));
 +              ret = -ESTALE;
 +      }
 +
 +      newtime = host_ts.host_time + timediff_adj;
 +      *ts = ns_to_timespec64(reftime_to_ns(newtime));
        spin_unlock_irqrestore(&host_ts.lock, flags);
  
 -      return ts;
 +      return ret;
  }
  
  static void hv_set_host_time(struct work_struct *work)
  {
 -      struct timespec64 ts = hv_get_adj_host_time();
  
 -      do_settimeofday64(&ts);
 +      struct timespec64 ts;
 +
 +      if (!hv_get_adj_host_time(&ts))
 +              do_settimeofday64(&ts);
  }
  
  /*
@@@ -387,23 -361,10 +387,23 @@@ static void timesync_onchannelcallback(
        struct ictimesync_ref_data *refdata;
        u8 *time_txf_buf = util_timesynch.recv_buffer;
  
 -      vmbus_recvpacket(channel, time_txf_buf,
 -                       HV_HYP_PAGE_SIZE, &recvlen, &requestid);
 +      /*
 +       * Drain the ring buffer and use the last packet to update
 +       * host_ts
 +       */
 +      while (1) {
 +              int ret = vmbus_recvpacket(channel, time_txf_buf,
 +                                         HV_HYP_PAGE_SIZE, &recvlen,
 +                                         &requestid);
 +              if (ret) {
 +                      pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
 +                                   ret);
 +                      break;
 +              }
 +
 +              if (!recvlen)
 +                      break;
  
 -      if (recvlen > 0) {
                icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
                                sizeof(struct vmbuspipe_hdr)];
  
@@@ -500,6 -461,9 +500,9 @@@ static void heartbeat_onchannelcallback
        }
  }
  
+ #define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+ #define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
  static int util_probe(struct hv_device *dev,
                        const struct hv_vmbus_device_id *dev_id)
  {
  
        hv_set_drvdata(dev, srv);
  
-       ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
-                        4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+       ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+                        HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
                         dev->channel);
        if (ret)
                goto error;
@@@ -590,8 -554,8 +593,8 @@@ static int util_resume(struct hv_devic
                        return ret;
        }
  
-       ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
-                        4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+       ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+                        HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
                         dev->channel);
        return ret;
  }
@@@ -661,7 -625,9 +664,7 @@@ static int hv_ptp_adjtime(struct ptp_cl
  
  static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
  {
 -      *ts = hv_get_adj_host_time();
 -
 -      return 0;
 +      return hv_get_adj_host_time(ts);
  }
  
  static struct ptp_clock_info ptp_hyperv_info = {
diff --combined drivers/hv/vmbus_drv.c
@@@ -83,7 -83,7 +83,7 @@@ static int hyperv_panic_event(struct no
  static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
                            void *args)
  {
-       struct die_args *die = (struct die_args *)args;
+       struct die_args *die = args;
        struct pt_regs *regs = die->regs;
  
        /* Don't notify Hyper-V if the die event is other than oops */
@@@ -2382,10 -2382,7 +2382,10 @@@ static int vmbus_bus_suspend(struct dev
        if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
                wait_for_completion(&vmbus_connection.ready_for_suspend_event);
  
 -      WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
 +      if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
 +              pr_err("Can not suspend due to a previous failed resuming\n");
 +              return -EBUSY;
 +      }
  
        mutex_lock(&vmbus_connection.channel_mutex);
  
@@@ -2459,9 -2456,7 +2459,9 @@@ static int vmbus_bus_resume(struct devi
  
        vmbus_request_offers();
  
 -      wait_for_completion(&vmbus_connection.ready_for_resume_event);
 +      if (wait_for_completion_timeout(
 +              &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
 +              pr_err("Some vmbus device is missing after suspending?\n");
  
        /* Reset the event for the next suspend. */
        reinit_completion(&vmbus_connection.ready_for_suspend_event);
@@@ -388,15 -388,6 +388,15 @@@ static int netvsc_init_buf(struct hv_de
        net_device->recv_section_size = resp->sections[0].sub_alloc_size;
        net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
  
 +      /* Ensure buffer will not overflow */
 +      if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
 +          (u64)net_device->recv_section_cnt > (u64)buf_size) {
 +              netdev_err(ndev, "invalid recv_section_size %u\n",
 +                         net_device->recv_section_size);
 +              ret = -EINVAL;
 +              goto cleanup;
 +      }
 +
        /* Setup receive completion ring.
         * Add 1 to the recv_section_cnt because at least one entry in a
         * ring buffer has to be empty.
        /* Parse the response */
        net_device->send_section_size = init_packet->msg.
                                v1_msg.send_send_buf_complete.section_size;
 +      if (net_device->send_section_size < NETVSC_MTU_MIN) {
 +              netdev_err(ndev, "invalid send_section_size %u\n",
 +                         net_device->send_section_size);
 +              ret = -EINVAL;
 +              goto cleanup;
 +      }
  
        /* Section count is simply the size divided by the section size. */
        net_device->send_section_cnt = buf_size / net_device->send_section_size;
@@@ -746,49 -731,12 +746,49 @@@ static void netvsc_send_completion(stru
                                   int budget)
  {
        const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
 +      u32 msglen = hv_pkt_datalen(desc);
 +
 +      /* Ensure packet is big enough to read header fields */
 +      if (msglen < sizeof(struct nvsp_message_header)) {
 +              netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
 +              return;
 +      }
  
        switch (nvsp_packet->hdr.msg_type) {
        case NVSP_MSG_TYPE_INIT_COMPLETE:
 +              if (msglen < sizeof(struct nvsp_message_header) +
 +                              sizeof(struct nvsp_message_init_complete)) {
 +                      netdev_err(ndev, "nvsp_msg length too small: %u\n",
 +                                 msglen);
 +                      return;
 +              }
 +              fallthrough;
 +
        case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
 +              if (msglen < sizeof(struct nvsp_message_header) +
 +                              sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
 +                      netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
 +                                 msglen);
 +                      return;
 +              }
 +              fallthrough;
 +
        case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
 +              if (msglen < sizeof(struct nvsp_message_header) +
 +                              sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
 +                      netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
 +                                 msglen);
 +                      return;
 +              }
 +              fallthrough;
 +
        case NVSP_MSG5_TYPE_SUBCHANNEL:
 +              if (msglen < sizeof(struct nvsp_message_header) +
 +                              sizeof(struct nvsp_5_subchannel_complete)) {
 +                      netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
 +                                 msglen);
 +                      return;
 +              }
                /* Copy the response back */
                memcpy(&net_device->channel_init_pkt, nvsp_packet,
                       sizeof(struct nvsp_message));
@@@ -846,7 -794,7 +846,7 @@@ static void netvsc_copy_to_send_buf(str
        }
  
        for (i = 0; i < page_count; i++) {
-               char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
+               char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
                u32 offset = pb[i].offset;
                u32 len = pb[i].len;
  
@@@ -1169,28 -1117,19 +1169,28 @@@ static void enq_receive_complete(struc
  static int netvsc_receive(struct net_device *ndev,
                          struct netvsc_device *net_device,
                          struct netvsc_channel *nvchan,
 -                        const struct vmpacket_descriptor *desc,
 -                        const struct nvsp_message *nvsp)
 +                        const struct vmpacket_descriptor *desc)
  {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct vmbus_channel *channel = nvchan->channel;
        const struct vmtransfer_page_packet_header *vmxferpage_packet
                = container_of(desc, const struct vmtransfer_page_packet_header, d);
 +      const struct nvsp_message *nvsp = hv_pkt_data(desc);
 +      u32 msglen = hv_pkt_datalen(desc);
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
        char *recv_buf = net_device->recv_buf;
        u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
  
 +      /* Ensure packet is big enough to read header fields */
 +      if (msglen < sizeof(struct nvsp_message_header)) {
 +              netif_err(net_device_ctx, rx_err, ndev,
 +                        "invalid nvsp header, length too small: %u\n",
 +                        msglen);
 +              return 0;
 +      }
 +
        /* Make sure this is a valid nvsp packet */
        if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
                netif_err(net_device_ctx, rx_err, ndev,
                return 0;
        }
  
 +      /* Validate xfer page pkt header */
 +      if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
 +              netif_err(net_device_ctx, rx_err, ndev,
 +                        "Invalid xfer page pkt, offset too small: %u\n",
 +                        desc->offset8 << 3);
 +              return 0;
 +      }
 +
        if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
                netif_err(net_device_ctx, rx_err, ndev,
                          "Invalid xfer page set id - expecting %x got %x\n",
  
        count = vmxferpage_packet->range_cnt;
  
 +      /* Check count for a valid value */
 +      if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
 +              netif_err(net_device_ctx, rx_err, ndev,
 +                        "Range count is not valid: %d\n",
 +                        count);
 +              return 0;
 +      }
 +
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
        for (i = 0; i < count; i++) {
                u32 offset = vmxferpage_packet->ranges[i].byte_offset;
                void *data;
                int ret;
  
 -              if (unlikely(offset + buflen > net_device->recv_buf_size)) {
 +              if (unlikely(offset > net_device->recv_buf_size ||
 +                           buflen > net_device->recv_buf_size - offset)) {
                        nvchan->rsc.cnt = 0;
                        status = NVSP_STAT_FAIL;
                        netif_err(net_device_ctx, rx_err, ndev,
@@@ -1272,13 -1194,6 +1272,13 @@@ static void netvsc_send_table(struct ne
        u32 count, offset, *tab;
        int i;
  
 +      /* Ensure packet is big enough to read send_table fields */
 +      if (msglen < sizeof(struct nvsp_message_header) +
 +                   sizeof(struct nvsp_5_send_indirect_table)) {
 +              netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
 +              return;
 +      }
 +
        count = nvmsg->msg.v5_msg.send_table.count;
        offset = nvmsg->msg.v5_msg.send_table.offset;
  
  }
  
  static void netvsc_send_vf(struct net_device *ndev,
 -                         const struct nvsp_message *nvmsg)
 +                         const struct nvsp_message *nvmsg,
 +                         u32 msglen)
  {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
  
 +      /* Ensure packet is big enough to read its fields */
 +      if (msglen < sizeof(struct nvsp_message_header) +
 +                   sizeof(struct nvsp_4_send_vf_association)) {
 +              netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
 +              return;
 +      }
 +
        net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
        net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
        netdev_info(ndev, "VF slot %u %s\n",
  
  static void netvsc_receive_inband(struct net_device *ndev,
                                  struct netvsc_device *nvscdev,
 -                                const struct nvsp_message *nvmsg,
 -                                u32 msglen)
 +                                const struct vmpacket_descriptor *desc)
  {
 +      const struct nvsp_message *nvmsg = hv_pkt_data(desc);
 +      u32 msglen = hv_pkt_datalen(desc);
 +
 +      /* Ensure packet is big enough to read header fields */
 +      if (msglen < sizeof(struct nvsp_message_header)) {
 +              netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
 +              return;
 +      }
 +
        switch (nvmsg->hdr.msg_type) {
        case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
                netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
                break;
  
        case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
 -              netvsc_send_vf(ndev, nvmsg);
 +              netvsc_send_vf(ndev, nvmsg, msglen);
                break;
        }
  }
@@@ -1362,20 -1261,23 +1362,20 @@@ static int netvsc_process_raw_pkt(struc
  {
        struct vmbus_channel *channel = nvchan->channel;
        const struct nvsp_message *nvmsg = hv_pkt_data(desc);
 -      u32 msglen = hv_pkt_datalen(desc);
  
        trace_nvsp_recv(ndev, channel, nvmsg);
  
        switch (desc->type) {
        case VM_PKT_COMP:
 -              netvsc_send_completion(ndev, net_device, channel,
 -                                     desc, budget);
 +              netvsc_send_completion(ndev, net_device, channel, desc, budget);
                break;
  
        case VM_PKT_DATA_USING_XFER_PAGES:
 -              return netvsc_receive(ndev, net_device, nvchan,
 -                                    desc, nvmsg);
 +              return netvsc_receive(ndev, net_device, nvchan, desc);
                break;
  
        case VM_PKT_DATA_INBAND:
 -              netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
 +              netvsc_receive_inband(ndev, net_device, desc);
                break;
  
        default:
@@@ -367,38 -367,35 +367,35 @@@ static u16 netvsc_select_queue(struct n
        }
        rcu_read_unlock();
  
 -      while (unlikely(txq >= ndev->real_num_tx_queues))
 +      while (txq >= ndev->real_num_tx_queues)
                txq -= ndev->real_num_tx_queues;
  
        return txq;
  }
  
- static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
+ static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
                       struct hv_page_buffer *pb)
  {
        int j = 0;
  
-       /* Deal with compound pages by ignoring unused part
-        * of the page.
-        */
-       page += (offset >> PAGE_SHIFT);
-       offset &= ~PAGE_MASK;
+       hvpfn += offset >> HV_HYP_PAGE_SHIFT;
+       offset = offset & ~HV_HYP_PAGE_MASK;
  
        while (len > 0) {
                unsigned long bytes;
  
-               bytes = PAGE_SIZE - offset;
+               bytes = HV_HYP_PAGE_SIZE - offset;
                if (bytes > len)
                        bytes = len;
-               pb[j].pfn = page_to_pfn(page);
+               pb[j].pfn = hvpfn;
                pb[j].offset = offset;
                pb[j].len = bytes;
  
                offset += bytes;
                len -= bytes;
  
-               if (offset == PAGE_SIZE && len) {
-                       page++;
+               if (offset == HV_HYP_PAGE_SIZE && len) {
+                       hvpfn++;
                        offset = 0;
                        j++;
                }
@@@ -421,23 -418,26 +418,26 @@@ static u32 init_page_array(void *hdr, u
         * 2. skb linear data
         * 3. skb fragment data
         */
-       slots_used += fill_pg_buf(virt_to_page(hdr),
-                                 offset_in_page(hdr),
-                                 len, &pb[slots_used]);
+       slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
+                                 offset_in_hvpage(hdr),
+                                 len,
+                                 &pb[slots_used]);
  
        packet->rmsg_size = len;
        packet->rmsg_pgcnt = slots_used;
  
-       slots_used += fill_pg_buf(virt_to_page(data),
-                               offset_in_page(data),
-                               skb_headlen(skb), &pb[slots_used]);
+       slots_used += fill_pg_buf(virt_to_hvpfn(data),
+                                 offset_in_hvpage(data),
+                                 skb_headlen(skb),
+                                 &pb[slots_used]);
  
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  
-               slots_used += fill_pg_buf(skb_frag_page(frag),
-                                       skb_frag_off(frag),
-                                       skb_frag_size(frag), &pb[slots_used]);
+               slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
+                                         skb_frag_off(frag),
+                                         skb_frag_size(frag),
+                                         &pb[slots_used]);
        }
        return slots_used;
  }
@@@ -453,8 -453,8 +453,8 @@@ static int count_skb_frag_slots(struct 
                unsigned long offset = skb_frag_off(frag);
  
                /* Skip unused frames from start of page */
-               offset &= ~PAGE_MASK;
-               pages += PFN_UP(offset + size);
+               offset &= ~HV_HYP_PAGE_MASK;
+               pages += HVPFN_UP(offset + size);
        }
        return pages;
  }
  static int netvsc_get_slots(struct sk_buff *skb)
  {
        char *data = skb->data;
-       unsigned int offset = offset_in_page(data);
+       unsigned int offset = offset_in_hvpage(data);
        unsigned int len = skb_headlen(skb);
        int slots;
        int frag_slots;
  
-       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+       slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
        frag_slots = count_skb_frag_slots(skb);
        return slots + frag_slots;
  }
@@@ -502,7 -502,7 +502,7 @@@ static int netvsc_vf_xmit(struct net_de
        int rc;
  
        skb->dev = vf_netdev;
 -      skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
 +      skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
  
        rc = dev_queue_xmit(skb);
        if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
@@@ -748,13 -748,6 +748,13 @@@ void netvsc_linkstatus_callback(struct 
        struct netvsc_reconfig *event;
        unsigned long flags;
  
 +      /* Ensure the packet is big enough to access its fields */
 +      if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
 +              netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
 +                         resp->msg_len);
 +              return;
 +      }
 +
        /* Update the physical link speed when changing to another vSwitch */
        if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
                u32 speed;
@@@ -2373,16 -2366,7 +2373,16 @@@ static int netvsc_register_vf(struct ne
        return NOTIFY_OK;
  }
  
 -/* VF up/down change detected, schedule to change data path */
 +/* Change the data path when VF UP/DOWN/CHANGE are detected.
 + *
 + * Typically a UP or DOWN event is followed by a CHANGE event, so
 + * net_device_ctx->data_path_is_vf is used to cache the current data path
 + * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
 + * message.
 + *
 + * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
 + * interface, there is only the CHANGE event and no UP or DOWN event.
 + */
  static int netvsc_vf_changed(struct net_device *vf_netdev)
  {
        struct net_device_context *net_device_ctx;
        if (!netvsc_dev)
                return NOTIFY_DONE;
  
 +      if (net_device_ctx->data_path_is_vf == vf_is_up)
 +              return NOTIFY_OK;
 +      net_device_ctx->data_path_is_vf = vf_is_up;
 +
        netvsc_switch_datapath(ndev, vf_is_up);
        netdev_info(ndev, "Data path switched %s VF: %s\n",
                    vf_is_up ? "to" : "from", vf_netdev->name);
@@@ -2607,8 -2587,8 +2607,8 @@@ static int netvsc_remove(struct hv_devi
  static int netvsc_suspend(struct hv_device *dev)
  {
        struct net_device_context *ndev_ctx;
 -      struct net_device *vf_netdev, *net;
        struct netvsc_device *nvdev;
 +      struct net_device *net;
        int ret;
  
        net = hv_get_drvdata(dev);
                goto out;
        }
  
 -      vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
 -      if (vf_netdev)
 -              netvsc_unregister_vf(vf_netdev);
 -
        /* Save the current config info */
        ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
  
@@@ -2644,12 -2628,6 +2644,12 @@@ static int netvsc_resume(struct hv_devi
        rtnl_lock();
  
        net_device_ctx = netdev_priv(net);
 +
 +      /* Reset the data path to the netvsc NIC before re-opening the vmbus
 +       * channel. Later netvsc_netdev_event() will switch the data path to
 +       * the VF upon the UP or CHANGE event.
 +       */
 +      net_device_ctx->data_path_is_vf = false;
        device_info = net_device_ctx->saved_netvsc_dev_info;
  
        ret = netvsc_attach(net, device_info);
@@@ -2717,7 -2695,6 +2717,7 @@@ static int netvsc_netdev_event(struct n
                return netvsc_unregister_vf(event_dev);
        case NETDEV_UP:
        case NETDEV_DOWN:
 +      case NETDEV_CHANGE:
                return netvsc_vf_changed(event_dev);
        default:
                return NOTIFY_DONE;
@@@ -25,7 -25,7 +25,7 @@@
  
  static void rndis_set_multicast(struct work_struct *w);
  
- #define RNDIS_EXT_LEN PAGE_SIZE
+ #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
  struct rndis_request {
        struct list_head list_ent;
        struct completion  wait_event;
@@@ -215,18 -215,17 +215,17 @@@ static int rndis_filter_send_request(st
        packet->page_buf_cnt = 1;
  
        pb[0].pfn = virt_to_phys(&req->request_msg) >>
-                                       PAGE_SHIFT;
+                                       HV_HYP_PAGE_SHIFT;
        pb[0].len = req->request_msg.msg_len;
-       pb[0].offset =
-               (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
+       pb[0].offset = offset_in_hvpage(&req->request_msg);
  
        /* Add one page_buf when request_msg crossing page boundary */
-       if (pb[0].offset + pb[0].len > PAGE_SIZE) {
+       if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
                packet->page_buf_cnt++;
-               pb[0].len = PAGE_SIZE -
+               pb[0].len = HV_HYP_PAGE_SIZE -
                        pb[0].offset;
                pb[1].pfn = virt_to_phys((void *)&req->request_msg
-                       + pb[0].len) >> PAGE_SHIFT;
+                       + pb[0].len) >> HV_HYP_PAGE_SHIFT;
                pb[1].offset = 0;
                pb[1].len = req->request_msg.msg_len -
                        pb[0].len;
@@@ -275,16 -274,6 +274,16 @@@ static void rndis_filter_receive_respon
                return;
        }
  
 +      /* Ensure the packet is big enough to read req_id. Req_id is the 1st
 +       * field in any request/response message, so the payload should have at
 +       * least sizeof(u32) bytes
 +       */
 +      if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
 +              netdev_err(ndev, "rndis msg_len too small: %u\n",
 +                         resp->msg_len);
 +              return;
 +      }
 +
        spin_lock_irqsave(&dev->request_lock, flags);
        list_for_each_entry(request, &dev->req_list, list_ent) {
                /*
   * Get the Per-Packet-Info with the specified type
   * return NULL if not found.
   */
 -static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
 -                                u32 type, u8 internal)
 +static inline void *rndis_get_ppi(struct net_device *ndev,
 +                                struct rndis_packet *rpkt,
 +                                u32 rpkt_len, u32 type, u8 internal)
  {
        struct rndis_per_packet_info *ppi;
        int len;
        if (rpkt->per_pkt_info_offset == 0)
                return NULL;
  
 +      /* Validate info_offset and info_len */
 +      if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
 +          rpkt->per_pkt_info_offset > rpkt_len) {
 +              netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
 +                         rpkt->per_pkt_info_offset);
 +              return NULL;
 +      }
 +
 +      if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
 +              netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
 +                         rpkt->per_pkt_info_len);
 +              return NULL;
 +      }
 +
        ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
                rpkt->per_pkt_info_offset);
        len = rpkt->per_pkt_info_len;
  
        while (len > 0) {
 +              /* Validate ppi_offset and ppi_size */
 +              if (ppi->size > len) {
 +                      netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
 +                      continue;
 +              }
 +
 +              if (ppi->ppi_offset >= ppi->size) {
 +                      netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
 +                      continue;
 +              }
 +
                if (ppi->type == type && ppi->internal == internal)
                        return (void *)((ulong)ppi + ppi->ppi_offset);
                len -= ppi->size;
@@@ -424,29 -387,14 +423,29 @@@ static int rndis_filter_receive_data(st
        const struct ndis_pkt_8021q_info *vlan;
        const struct rndis_pktinfo_id *pktinfo_id;
        const u32 *hash_info;
 -      u32 data_offset;
 +      u32 data_offset, rpkt_len;
        void *data;
        bool rsc_more = false;
        int ret;
  
 +      /* Ensure data_buflen is big enough to read header fields */
 +      if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
 +              netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
 +                         data_buflen);
 +              return NVSP_STAT_FAIL;
 +      }
 +
 +      /* Validate rndis_pkt offset */
 +      if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
 +              netdev_err(ndev, "invalid rndis packet offset: %u\n",
 +                         rndis_pkt->data_offset);
 +              return NVSP_STAT_FAIL;
 +      }
 +
        /* Remove the rndis header and pass it back up the stack */
        data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
  
 +      rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
        data_buflen -= data_offset;
  
        /*
                return NVSP_STAT_FAIL;
        }
  
 -      vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0);
 +      vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
  
 -      csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
 +      csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
  
 -      hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
 +      hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
  
 -      pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
 +      pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
  
        data = (void *)msg + data_offset;
  
@@@ -525,14 -473,6 +524,14 @@@ int rndis_filter_receive(struct net_dev
        if (netif_msg_rx_status(net_device_ctx))
                dump_rndis_message(ndev, rndis_msg);
  
 +      /* Validate incoming rndis_message packet */
 +      if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
 +          buflen < rndis_msg->msg_len) {
 +              netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
 +                         buflen, rndis_msg->msg_len);
 +              return NVSP_STAT_FAIL;
 +      }
 +
        switch (rndis_msg->ndis_msg_type) {
        case RNDIS_MSG_PACKET:
                return rndis_filter_receive_data(ndev, net_dev, nvchan,
@@@ -1531,8 -1531,16 +1531,8 @@@ static struct irq_chip hv_msi_irq_chip 
        .irq_unmask             = hv_irq_unmask,
  };
  
 -static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
 -                                                 msi_alloc_info_t *arg)
 -{
 -      return arg->msi_hwirq;
 -}
 -
  static struct msi_domain_ops hv_msi_ops = {
 -      .get_hwirq      = hv_msi_domain_ops_get_hwirq,
        .msi_prepare    = pci_msi_prepare,
 -      .set_desc       = pci_msi_set_desc,
        .msi_free       = hv_msi_free,
  };
  
@@@ -2507,7 -2515,10 +2507,10 @@@ static void hv_pci_onchannelcallback(vo
  
  /**
   * hv_pci_protocol_negotiation() - Set up protocol
-  * @hdev:     VMBus's tracking struct for this root PCI bus
+  * @hdev:             VMBus's tracking struct for this root PCI bus.
+  * @version:          Array of supported channel protocol versions in
+  *                    the order of probing - highest go first.
+  * @num_version:      Number of elements in the version array.
   *
   * This driver is intended to support running on Windows 10
   * (server) and later versions. It will not run on earlier