staging: hv: move netvsc_send_recv_completion() to clean up forward declaration
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / hv / netvsc.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/delay.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include "hv_api.h"
31 #include "logging.h"
32 #include "netvsc.h"
33 #include "rndis_filter.h"
34 #include "channel.h"
35
36
37 /* Globals */
38 static const char *driver_name = "netvsc";
39
40 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
41 static const struct hv_guid netvsc_device_type = {
42         .data = {
43                 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
44                 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
45         }
46 };
47
48 static void netvsc_channel_cb(void *context);
49
50 static int netvsc_init_send_buf(struct hv_device *device);
51
52 static int netvsc_init_recv_buf(struct hv_device *device);
53
54 static int netvsc_destroy_send_buf(struct netvsc_device *net_device);
55
56 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device);
57
58 static int netvsc_connect_vsp(struct hv_device *device);
59
60 static void netvsc_send_completion(struct hv_device *device,
61                                    struct vmpacket_descriptor *packet);
62
63 static void netvsc_receive(struct hv_device *device,
64                             struct vmpacket_descriptor *packet);
65
66
67 static struct netvsc_device *alloc_net_device(struct hv_device *device)
68 {
69         struct netvsc_device *net_device;
70
71         net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
72         if (!net_device)
73                 return NULL;
74
75         /* Set to 2 to allow both inbound and outbound traffic */
76         atomic_cmpxchg(&net_device->refcnt, 0, 2);
77
78         net_device->dev = device;
79         device->ext = net_device;
80
81         return net_device;
82 }
83
84 static void free_net_device(struct netvsc_device *device)
85 {
86         WARN_ON(atomic_read(&device->refcnt) != 0);
87         device->dev->ext = NULL;
88         kfree(device);
89 }
90
91
92 /* Get the net device object iff exists and its refcount > 1 */
93 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
94 {
95         struct netvsc_device *net_device;
96
97         net_device = device->ext;
98         if (net_device && atomic_read(&net_device->refcnt) > 1)
99                 atomic_inc(&net_device->refcnt);
100         else
101                 net_device = NULL;
102
103         return net_device;
104 }
105
106 /* Get the net device object iff exists and its refcount > 0 */
107 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
108 {
109         struct netvsc_device *net_device;
110
111         net_device = device->ext;
112         if (net_device && atomic_read(&net_device->refcnt))
113                 atomic_inc(&net_device->refcnt);
114         else
115                 net_device = NULL;
116
117         return net_device;
118 }
119
120 static void put_net_device(struct hv_device *device)
121 {
122         struct netvsc_device *net_device;
123
124         net_device = device->ext;
125
126         atomic_dec(&net_device->refcnt);
127 }
128
129 static struct netvsc_device *release_outbound_net_device(
130                 struct hv_device *device)
131 {
132         struct netvsc_device *net_device;
133
134         net_device = device->ext;
135         if (net_device == NULL)
136                 return NULL;
137
138         /* Busy wait until the ref drop to 2, then set it to 1 */
139         while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
140                 udelay(100);
141
142         return net_device;
143 }
144
145 static struct netvsc_device *release_inbound_net_device(
146                 struct hv_device *device)
147 {
148         struct netvsc_device *net_device;
149
150         net_device = device->ext;
151         if (net_device == NULL)
152                 return NULL;
153
154         /* Busy wait until the ref drop to 1, then set it to 0 */
155         while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
156                 udelay(100);
157
158         device->ext = NULL;
159         return net_device;
160 }
161
162 static int netvsc_init_recv_buf(struct hv_device *device)
163 {
164         int ret = 0;
165         struct netvsc_device *net_device;
166         struct nvsp_message *init_packet;
167
168         net_device = get_outbound_net_device(device);
169         if (!net_device) {
170                 dev_err(&device->device, "unable to get net device..."
171                            "device being destroyed?");
172                 return -1;
173         }
174
175         net_device->recv_buf =
176                 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
177                                 get_order(net_device->recv_buf_size));
178         if (!net_device->recv_buf) {
179                 dev_err(&device->device, "unable to allocate receive "
180                         "buffer of size %d", net_device->recv_buf_size);
181                 ret = -1;
182                 goto cleanup;
183         }
184
185         /*
186          * Establish the gpadl handle for this buffer on this
187          * channel.  Note: This call uses the vmbus connection rather
188          * than the channel to establish the gpadl handle.
189          */
190         ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
191                                     net_device->recv_buf_size,
192                                     &net_device->recv_buf_gpadl_handle);
193         if (ret != 0) {
194                 dev_err(&device->device,
195                         "unable to establish receive buffer's gpadl");
196                 goto cleanup;
197         }
198
199
200         /* Notify the NetVsp of the gpadl handle */
201         init_packet = &net_device->channel_init_pkt;
202
203         memset(init_packet, 0, sizeof(struct nvsp_message));
204
205         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
206         init_packet->msg.v1_msg.send_recv_buf.
207                 gpadl_handle = net_device->recv_buf_gpadl_handle;
208         init_packet->msg.v1_msg.
209                 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
210
211         /* Send the gpadl notification request */
212         net_device->wait_condition = 0;
213         ret = vmbus_sendpacket(device->channel, init_packet,
214                                sizeof(struct nvsp_message),
215                                (unsigned long)init_packet,
216                                VM_PKT_DATA_INBAND,
217                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
218         if (ret != 0) {
219                 dev_err(&device->device,
220                         "unable to send receive buffer's gpadl to netvsp");
221                 goto cleanup;
222         }
223
224         wait_event_timeout(net_device->channel_init_wait,
225                         net_device->wait_condition,
226                         msecs_to_jiffies(1000));
227         BUG_ON(net_device->wait_condition == 0);
228
229
230         /* Check the response */
231         if (init_packet->msg.v1_msg.
232             send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
233                 dev_err(&device->device, "Unable to complete receive buffer "
234                            "initialzation with NetVsp - status %d",
235                            init_packet->msg.v1_msg.
236                            send_recv_buf_complete.status);
237                 ret = -1;
238                 goto cleanup;
239         }
240
241         /* Parse the response */
242
243         net_device->recv_section_cnt = init_packet->msg.
244                 v1_msg.send_recv_buf_complete.num_sections;
245
246         net_device->recv_section = kmalloc(net_device->recv_section_cnt
247                 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
248         if (net_device->recv_section == NULL) {
249                 ret = -1;
250                 goto cleanup;
251         }
252
253         memcpy(net_device->recv_section,
254                 init_packet->msg.v1_msg.
255                send_recv_buf_complete.sections,
256                 net_device->recv_section_cnt *
257                sizeof(struct nvsp_1_receive_buffer_section));
258
259         /*
260          * For 1st release, there should only be 1 section that represents the
261          * entire receive buffer
262          */
263         if (net_device->recv_section_cnt != 1 ||
264             net_device->recv_section->offset != 0) {
265                 ret = -1;
266                 goto cleanup;
267         }
268
269         goto exit;
270
271 cleanup:
272         netvsc_destroy_recv_buf(net_device);
273
274 exit:
275         put_net_device(device);
276         return ret;
277 }
278
279 static int netvsc_init_send_buf(struct hv_device *device)
280 {
281         int ret = 0;
282         struct netvsc_device *net_device;
283         struct nvsp_message *init_packet;
284
285         net_device = get_outbound_net_device(device);
286         if (!net_device) {
287                 dev_err(&device->device, "unable to get net device..."
288                            "device being destroyed?");
289                 return -1;
290         }
291         if (net_device->send_buf_size <= 0) {
292                 ret = -EINVAL;
293                 goto cleanup;
294         }
295
296         net_device->send_buf =
297                 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
298                                 get_order(net_device->send_buf_size));
299         if (!net_device->send_buf) {
300                 dev_err(&device->device, "unable to allocate send "
301                         "buffer of size %d", net_device->send_buf_size);
302                 ret = -1;
303                 goto cleanup;
304         }
305
306         /*
307          * Establish the gpadl handle for this buffer on this
308          * channel.  Note: This call uses the vmbus connection rather
309          * than the channel to establish the gpadl handle.
310          */
311         ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
312                                     net_device->send_buf_size,
313                                     &net_device->send_buf_gpadl_handle);
314         if (ret != 0) {
315                 dev_err(&device->device, "unable to establish send buffer's gpadl");
316                 goto cleanup;
317         }
318
319         /* Notify the NetVsp of the gpadl handle */
320         init_packet = &net_device->channel_init_pkt;
321
322         memset(init_packet, 0, sizeof(struct nvsp_message));
323
324         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
325         init_packet->msg.v1_msg.send_recv_buf.
326                 gpadl_handle = net_device->send_buf_gpadl_handle;
327         init_packet->msg.v1_msg.send_recv_buf.id =
328                 NETVSC_SEND_BUFFER_ID;
329
330         /* Send the gpadl notification request */
331         net_device->wait_condition = 0;
332         ret = vmbus_sendpacket(device->channel, init_packet,
333                                sizeof(struct nvsp_message),
334                                (unsigned long)init_packet,
335                                VM_PKT_DATA_INBAND,
336                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
337         if (ret != 0) {
338                 dev_err(&device->device,
339                            "unable to send receive buffer's gpadl to netvsp");
340                 goto cleanup;
341         }
342
343         wait_event_timeout(net_device->channel_init_wait,
344                         net_device->wait_condition,
345                         msecs_to_jiffies(1000));
346         BUG_ON(net_device->wait_condition == 0);
347
348         /* Check the response */
349         if (init_packet->msg.v1_msg.
350             send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
351                 dev_err(&device->device, "Unable to complete send buffer "
352                            "initialzation with NetVsp - status %d",
353                            init_packet->msg.v1_msg.
354                            send_send_buf_complete.status);
355                 ret = -1;
356                 goto cleanup;
357         }
358
359         net_device->send_section_size = init_packet->
360         msg.v1_msg.send_send_buf_complete.section_size;
361
362         goto exit;
363
364 cleanup:
365         netvsc_destroy_send_buf(net_device);
366
367 exit:
368         put_net_device(device);
369         return ret;
370 }
371
372 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
373 {
374         struct nvsp_message *revoke_packet;
375         int ret = 0;
376
377         /*
378          * If we got a section count, it means we received a
379          * SendReceiveBufferComplete msg (ie sent
380          * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
381          * to send a revoke msg here
382          */
383         if (net_device->recv_section_cnt) {
384                 /* Send the revoke receive buffer */
385                 revoke_packet = &net_device->revoke_packet;
386                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
387
388                 revoke_packet->hdr.msg_type =
389                         NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
390                 revoke_packet->msg.v1_msg.
391                 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
392
393                 ret = vmbus_sendpacket(net_device->dev->channel,
394                                        revoke_packet,
395                                        sizeof(struct nvsp_message),
396                                        (unsigned long)revoke_packet,
397                                        VM_PKT_DATA_INBAND, 0);
398                 /*
399                  * If we failed here, we might as well return and
400                  * have a leak rather than continue and a bugchk
401                  */
402                 if (ret != 0) {
403                         dev_err(&net_device->dev->device, "unable to send "
404                                 "revoke receive buffer to netvsp");
405                         return -1;
406                 }
407         }
408
409         /* Teardown the gpadl on the vsp end */
410         if (net_device->recv_buf_gpadl_handle) {
411                 ret = vmbus_teardown_gpadl(net_device->dev->channel,
412                            net_device->recv_buf_gpadl_handle);
413
414                 /* If we failed here, we might as well return and have a leak rather than continue and a bugchk */
415                 if (ret != 0) {
416                         dev_err(&net_device->dev->device,
417                                    "unable to teardown receive buffer's gpadl");
418                         return -1;
419                 }
420                 net_device->recv_buf_gpadl_handle = 0;
421         }
422
423         if (net_device->recv_buf) {
424                 /* Free up the receive buffer */
425                 free_pages((unsigned long)net_device->recv_buf,
426                         get_order(net_device->recv_buf_size));
427                 net_device->recv_buf = NULL;
428         }
429
430         if (net_device->recv_section) {
431                 net_device->recv_section_cnt = 0;
432                 kfree(net_device->recv_section);
433                 net_device->recv_section = NULL;
434         }
435
436         return ret;
437 }
438
439 static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
440 {
441         struct nvsp_message *revoke_packet;
442         int ret = 0;
443
444         /*
445          * If we got a section count, it means we received a
446          *  SendReceiveBufferComplete msg (ie sent
447          *  NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
448          *  to send a revoke msg here
449          */
450         if (net_device->send_section_size) {
451                 /* Send the revoke send buffer */
452                 revoke_packet = &net_device->revoke_packet;
453                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
454
455                 revoke_packet->hdr.msg_type =
456                         NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
457                 revoke_packet->msg.v1_msg.
458                         revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
459
460                 ret = vmbus_sendpacket(net_device->dev->channel,
461                                        revoke_packet,
462                                        sizeof(struct nvsp_message),
463                                        (unsigned long)revoke_packet,
464                                        VM_PKT_DATA_INBAND, 0);
465                 /*
466                  * If we failed here, we might as well return and have a leak
467                  * rather than continue and a bugchk
468                  */
469                 if (ret != 0) {
470                         dev_err(&net_device->dev->device, "unable to send "
471                                 "revoke send buffer to netvsp");
472                         return -1;
473                 }
474         }
475
476         /* Teardown the gpadl on the vsp end */
477         if (net_device->send_buf_gpadl_handle) {
478                 ret = vmbus_teardown_gpadl(net_device->dev->channel,
479                                            net_device->send_buf_gpadl_handle);
480
481                 /*
482                  * If we failed here, we might as well return and have a leak
483                  * rather than continue and a bugchk
484                  */
485                 if (ret != 0) {
486                         dev_err(&net_device->dev->device,
487                                 "unable to teardown send buffer's gpadl");
488                         return -1;
489                 }
490                 net_device->send_buf_gpadl_handle = 0;
491         }
492
493         if (net_device->send_buf) {
494                 /* Free up the receive buffer */
495                 free_pages((unsigned long)net_device->send_buf,
496                                 get_order(net_device->send_buf_size));
497                 net_device->send_buf = NULL;
498         }
499
500         return ret;
501 }
502
503
504 static int netvsc_connect_vsp(struct hv_device *device)
505 {
506         int ret;
507         struct netvsc_device *net_device;
508         struct nvsp_message *init_packet;
509         int ndis_version;
510
511         net_device = get_outbound_net_device(device);
512         if (!net_device) {
513                 dev_err(&device->device, "unable to get net device..."
514                            "device being destroyed?");
515                 return -1;
516         }
517
518         init_packet = &net_device->channel_init_pkt;
519
520         memset(init_packet, 0, sizeof(struct nvsp_message));
521         init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
522         init_packet->msg.init_msg.init.min_protocol_ver =
523                 NVSP_MIN_PROTOCOL_VERSION;
524         init_packet->msg.init_msg.init.max_protocol_ver =
525                 NVSP_MAX_PROTOCOL_VERSION;
526
527         /* Send the init request */
528         net_device->wait_condition = 0;
529         ret = vmbus_sendpacket(device->channel, init_packet,
530                                sizeof(struct nvsp_message),
531                                (unsigned long)init_packet,
532                                VM_PKT_DATA_INBAND,
533                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
534
535         if (ret != 0)
536                 goto cleanup;
537
538         wait_event_timeout(net_device->channel_init_wait,
539                         net_device->wait_condition,
540                         msecs_to_jiffies(1000));
541         if (net_device->wait_condition == 0) {
542                 ret = -ETIMEDOUT;
543                 goto cleanup;
544         }
545
546         if (init_packet->msg.init_msg.init_complete.status !=
547             NVSP_STAT_SUCCESS) {
548                 ret = -1;
549                 goto cleanup;
550         }
551
552         if (init_packet->msg.init_msg.init_complete.
553             negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
554                 ret = -1;
555                 goto cleanup;
556         }
557         /* Send the ndis version */
558         memset(init_packet, 0, sizeof(struct nvsp_message));
559
560         ndis_version = 0x00050000;
561
562         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
563         init_packet->msg.v1_msg.
564                 send_ndis_ver.ndis_major_ver =
565                                 (ndis_version & 0xFFFF0000) >> 16;
566         init_packet->msg.v1_msg.
567                 send_ndis_ver.ndis_minor_ver =
568                                 ndis_version & 0xFFFF;
569
570         /* Send the init request */
571         ret = vmbus_sendpacket(device->channel, init_packet,
572                                 sizeof(struct nvsp_message),
573                                 (unsigned long)init_packet,
574                                 VM_PKT_DATA_INBAND, 0);
575         if (ret != 0) {
576                 ret = -1;
577                 goto cleanup;
578         }
579
580         /* Post the big receive buffer to NetVSP */
581         ret = netvsc_init_recv_buf(device);
582         if (ret == 0)
583                 ret = netvsc_init_send_buf(device);
584
585 cleanup:
586         put_net_device(device);
587         return ret;
588 }
589
590 static void NetVscDisconnectFromVsp(struct netvsc_device *net_device)
591 {
592         netvsc_destroy_recv_buf(net_device);
593         netvsc_destroy_send_buf(net_device);
594 }
595
596 /*
597  * netvsc_device_add - Callback when the device belonging to this
598  * driver is added
599  */
600 static int netvsc_device_add(struct hv_device *device, void *additional_info)
601 {
602         int ret = 0;
603         int i;
604         struct netvsc_device *net_device;
605         struct hv_netvsc_packet *packet, *pos;
606         struct netvsc_driver *net_driver =
607                                 (struct netvsc_driver *)device->drv;
608
609         net_device = alloc_net_device(device);
610         if (!net_device) {
611                 ret = -1;
612                 goto cleanup;
613         }
614
615         /* Initialize the NetVSC channel extension */
616         net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
617         spin_lock_init(&net_device->recv_pkt_list_lock);
618
619         net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
620
621         INIT_LIST_HEAD(&net_device->recv_pkt_list);
622
623         for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
624                 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
625                                  (NETVSC_RECEIVE_SG_COUNT *
626                                   sizeof(struct hv_page_buffer)), GFP_KERNEL);
627                 if (!packet)
628                         break;
629
630                 list_add_tail(&packet->list_ent,
631                               &net_device->recv_pkt_list);
632         }
633         init_waitqueue_head(&net_device->channel_init_wait);
634
635         /* Open the channel */
636         ret = vmbus_open(device->channel, net_driver->ring_buf_size,
637                          net_driver->ring_buf_size, NULL, 0,
638                          netvsc_channel_cb, device);
639
640         if (ret != 0) {
641                 dev_err(&device->device, "unable to open channel: %d", ret);
642                 ret = -1;
643                 goto cleanup;
644         }
645
646         /* Channel is opened */
647         pr_info("hv_netvsc channel opened successfully");
648
649         /* Connect with the NetVsp */
650         ret = netvsc_connect_vsp(device);
651         if (ret != 0) {
652                 dev_err(&device->device,
653                         "unable to connect to NetVSP - %d", ret);
654                 ret = -1;
655                 goto close;
656         }
657
658         return ret;
659
660 close:
661         /* Now, we can close the channel safely */
662         vmbus_close(device->channel);
663
664 cleanup:
665
666         if (net_device) {
667                 list_for_each_entry_safe(packet, pos,
668                                          &net_device->recv_pkt_list,
669                                          list_ent) {
670                         list_del(&packet->list_ent);
671                         kfree(packet);
672                 }
673
674                 release_outbound_net_device(device);
675                 release_inbound_net_device(device);
676
677                 free_net_device(net_device);
678         }
679
680         return ret;
681 }
682
683 /*
684  * netvsc_device_remove - Callback when the root bus device is removed
685  */
686 static int netvsc_device_remove(struct hv_device *device)
687 {
688         struct netvsc_device *net_device;
689         struct hv_netvsc_packet *netvsc_packet, *pos;
690
691         /* Stop outbound traffic ie sends and receives completions */
692         net_device = release_outbound_net_device(device);
693         if (!net_device) {
694                 dev_err(&device->device, "No net device present!!");
695                 return -1;
696         }
697
698         /* Wait for all send completions */
699         while (atomic_read(&net_device->num_outstanding_sends)) {
700                 dev_err(&device->device,
701                         "waiting for %d requests to complete...",
702                         atomic_read(&net_device->num_outstanding_sends));
703                 udelay(100);
704         }
705
706         NetVscDisconnectFromVsp(net_device);
707
708         /* Stop inbound traffic ie receives and sends completions */
709         net_device = release_inbound_net_device(device);
710
711         /* At this point, no one should be accessing netDevice except in here */
712         dev_notice(&device->device, "net device safe to remove");
713
714         /* Now, we can close the channel safely */
715         vmbus_close(device->channel);
716
717         /* Release all resources */
718         list_for_each_entry_safe(netvsc_packet, pos,
719                                  &net_device->recv_pkt_list, list_ent) {
720                 list_del(&netvsc_packet->list_ent);
721                 kfree(netvsc_packet);
722         }
723
724         free_net_device(net_device);
725         return 0;
726 }
727
728 /*
729  * netvsc_cleanup - Perform any cleanup when the driver is removed
730  */
731 static void netvsc_cleanup(struct hv_driver *drv)
732 {
733 }
734
735 static void netvsc_send_completion(struct hv_device *device,
736                                    struct vmpacket_descriptor *packet)
737 {
738         struct netvsc_device *net_device;
739         struct nvsp_message *nvsp_packet;
740         struct hv_netvsc_packet *nvsc_packet;
741
742         net_device = get_inbound_net_device(device);
743         if (!net_device) {
744                 dev_err(&device->device, "unable to get net device..."
745                            "device being destroyed?");
746                 return;
747         }
748
749         nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
750                         (packet->offset8 << 3));
751
752         if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
753             (nvsp_packet->hdr.msg_type ==
754              NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
755             (nvsp_packet->hdr.msg_type ==
756              NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
757                 /* Copy the response back */
758                 memcpy(&net_device->channel_init_pkt, nvsp_packet,
759                        sizeof(struct nvsp_message));
760                 net_device->wait_condition = 1;
761                 wake_up(&net_device->channel_init_wait);
762         } else if (nvsp_packet->hdr.msg_type ==
763                    NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
764                 /* Get the send context */
765                 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
766                         packet->trans_id;
767
768                 /* Notify the layer above us */
769                 nvsc_packet->completion.send.send_completion(
770                         nvsc_packet->completion.send.send_completion_ctx);
771
772                 atomic_dec(&net_device->num_outstanding_sends);
773         } else {
774                 dev_err(&device->device, "Unknown send completion packet type- "
775                            "%d received!!", nvsp_packet->hdr.msg_type);
776         }
777
778         put_net_device(device);
779 }
780
781 static int netvsc_send(struct hv_device *device,
782                         struct hv_netvsc_packet *packet)
783 {
784         struct netvsc_device *net_device;
785         int ret = 0;
786
787         struct nvsp_message sendMessage;
788
789         net_device = get_outbound_net_device(device);
790         if (!net_device) {
791                 dev_err(&device->device, "net device (%p) shutting down..."
792                            "ignoring outbound packets", net_device);
793                 return -2;
794         }
795
796         sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
797         if (packet->is_data_pkt) {
798                 /* 0 is RMC_DATA; */
799                 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
800         } else {
801                 /* 1 is RMC_CONTROL; */
802                 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
803         }
804
805         /* Not using send buffer section */
806         sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
807                 0xFFFFFFFF;
808         sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
809
810         if (packet->page_buf_cnt) {
811                 ret = vmbus_sendpacket_pagebuffer(device->channel,
812                                                   packet->page_buf,
813                                                   packet->page_buf_cnt,
814                                                   &sendMessage,
815                                                   sizeof(struct nvsp_message),
816                                                   (unsigned long)packet);
817         } else {
818                 ret = vmbus_sendpacket(device->channel, &sendMessage,
819                                        sizeof(struct nvsp_message),
820                                        (unsigned long)packet,
821                                        VM_PKT_DATA_INBAND,
822                                        VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
823
824         }
825
826         if (ret != 0)
827                 dev_err(&device->device, "Unable to send packet %p ret %d",
828                            packet, ret);
829
830         atomic_inc(&net_device->num_outstanding_sends);
831         put_net_device(device);
832         return ret;
833 }
834
835 static void netvsc_send_recv_completion(struct hv_device *device,
836                                         u64 transaction_id)
837 {
838         struct nvsp_message recvcompMessage;
839         int retries = 0;
840         int ret;
841
842         recvcompMessage.hdr.msg_type =
843                                 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
844
845         /* FIXME: Pass in the status */
846         recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
847                 NVSP_STAT_SUCCESS;
848
849 retry_send_cmplt:
850         /* Send the completion */
851         ret = vmbus_sendpacket(device->channel, &recvcompMessage,
852                                sizeof(struct nvsp_message), transaction_id,
853                                VM_PKT_COMP, 0);
854         if (ret == 0) {
855                 /* success */
856                 /* no-op */
857         } else if (ret == -1) {
858                 /* no more room...wait a bit and attempt to retry 3 times */
859                 retries++;
860                 dev_err(&device->device, "unable to send receive completion pkt"
861                         " (tid %llx)...retrying %d", transaction_id, retries);
862
863                 if (retries < 4) {
864                         udelay(100);
865                         goto retry_send_cmplt;
866                 } else {
867                         dev_err(&device->device, "unable to send receive "
868                                 "completion pkt (tid %llx)...give up retrying",
869                                 transaction_id);
870                 }
871         } else {
872                 dev_err(&device->device, "unable to send receive "
873                         "completion pkt - %llx", transaction_id);
874         }
875 }
876
877 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
878 static void netvsc_receive_completion(void *context)
879 {
880         struct hv_netvsc_packet *packet = context;
881         struct hv_device *device = (struct hv_device *)packet->device;
882         struct netvsc_device *net_device;
883         u64 transaction_id = 0;
884         bool fsend_receive_comp = false;
885         unsigned long flags;
886
887         /*
888          * Even though it seems logical to do a GetOutboundNetDevice() here to
889          * send out receive completion, we are using GetInboundNetDevice()
890          * since we may have disable outbound traffic already.
891          */
892         net_device = get_inbound_net_device(device);
893         if (!net_device) {
894                 dev_err(&device->device, "unable to get net device..."
895                            "device being destroyed?");
896                 return;
897         }
898
899         /* Overloading use of the lock. */
900         spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
901
902         packet->xfer_page_pkt->count--;
903
904         /*
905          * Last one in the line that represent 1 xfer page packet.
906          * Return the xfer page packet itself to the freelist
907          */
908         if (packet->xfer_page_pkt->count == 0) {
909                 fsend_receive_comp = true;
910                 transaction_id = packet->completion.recv.recv_completion_tid;
911                 list_add_tail(&packet->xfer_page_pkt->list_ent,
912                               &net_device->recv_pkt_list);
913
914         }
915
916         /* Put the packet back */
917         list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
918         spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
919
920         /* Send a receive completion for the xfer page packet */
921         if (fsend_receive_comp)
922                 netvsc_send_recv_completion(device, transaction_id);
923
924         put_net_device(device);
925 }
926
927 static void netvsc_receive(struct hv_device *device,
928                             struct vmpacket_descriptor *packet)
929 {
930         struct netvsc_device *net_device;
931         struct vmtransfer_page_packet_header *vmxferpage_packet;
932         struct nvsp_message *nvsp_packet;
933         struct hv_netvsc_packet *netvsc_packet = NULL;
934         unsigned long start;
935         unsigned long end, end_virtual;
936         /* struct netvsc_driver *netvscDriver; */
937         struct xferpage_packet *xferpage_packet = NULL;
938         int i, j;
939         int count = 0, bytes_remain = 0;
940         unsigned long flags;
941         LIST_HEAD(listHead);
942
943         net_device = get_inbound_net_device(device);
944         if (!net_device) {
945                 dev_err(&device->device, "unable to get net device..."
946                            "device being destroyed?");
947                 return;
948         }
949
950         /*
951          * All inbound packets other than send completion should be xfer page
952          * packet
953          */
954         if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
955                 dev_err(&device->device, "Unknown packet type received - %d",
956                            packet->type);
957                 put_net_device(device);
958                 return;
959         }
960
961         nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
962                         (packet->offset8 << 3));
963
964         /* Make sure this is a valid nvsp packet */
965         if (nvsp_packet->hdr.msg_type !=
966             NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
967                 dev_err(&device->device, "Unknown nvsp packet type received-"
968                         " %d", nvsp_packet->hdr.msg_type);
969                 put_net_device(device);
970                 return;
971         }
972
973         vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
974
975         if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
976                 dev_err(&device->device, "Invalid xfer page set id - "
977                            "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
978                            vmxferpage_packet->xfer_pageset_id);
979                 put_net_device(device);
980                 return;
981         }
982
983         /*
984          * Grab free packets (range count + 1) to represent this xfer
985          * page packet. +1 to represent the xfer page packet itself.
986          * We grab it here so that we know exactly how many we can
987          * fulfil
988          */
989         spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
990         while (!list_empty(&net_device->recv_pkt_list)) {
991                 list_move_tail(net_device->recv_pkt_list.next, &listHead);
992                 if (++count == vmxferpage_packet->range_cnt + 1)
993                         break;
994         }
995         spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
996
997         /*
998          * We need at least 2 netvsc pkts (1 to represent the xfer
999          * page and at least 1 for the range) i.e. we can handled
1000          * some of the xfer page packet ranges...
1001          */
1002         if (count < 2) {
1003                 dev_err(&device->device, "Got only %d netvsc pkt...needed "
1004                         "%d pkts. Dropping this xfer page packet completely!",
1005                         count, vmxferpage_packet->range_cnt + 1);
1006
1007                 /* Return it to the freelist */
1008                 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
1009                 for (i = count; i != 0; i--) {
1010                         list_move_tail(listHead.next,
1011                                        &net_device->recv_pkt_list);
1012                 }
1013                 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
1014                                        flags);
1015
1016                 netvsc_send_recv_completion(device,
1017                                             vmxferpage_packet->d.trans_id);
1018
1019                 put_net_device(device);
1020                 return;
1021         }
1022
1023         /* Remove the 1st packet to represent the xfer page packet itself */
1024         xferpage_packet = (struct xferpage_packet *)listHead.next;
1025         list_del(&xferpage_packet->list_ent);
1026
1027         /* This is how much we can satisfy */
1028         xferpage_packet->count = count - 1;
1029
1030         if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
1031                 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
1032                         "this xfer page...got %d",
1033                         vmxferpage_packet->range_cnt, xferpage_packet->count);
1034         }
1035
1036         /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1037         for (i = 0; i < (count - 1); i++) {
1038                 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
1039                 list_del(&netvsc_packet->list_ent);
1040
1041                 /* Initialize the netvsc packet */
1042                 netvsc_packet->xfer_page_pkt = xferpage_packet;
1043                 netvsc_packet->completion.recv.recv_completion =
1044                                         netvsc_receive_completion;
1045                 netvsc_packet->completion.recv.recv_completion_ctx =
1046                                         netvsc_packet;
1047                 netvsc_packet->device = device;
1048                 /* Save this so that we can send it back */
1049                 netvsc_packet->completion.recv.recv_completion_tid =
1050                                         vmxferpage_packet->d.trans_id;
1051
1052                 netvsc_packet->total_data_buflen =
1053                                         vmxferpage_packet->ranges[i].byte_count;
1054                 netvsc_packet->page_buf_cnt = 1;
1055
1056                 netvsc_packet->page_buf[0].len =
1057                                         vmxferpage_packet->ranges[i].byte_count;
1058
1059                 start = virt_to_phys((void *)((unsigned long)net_device->
1060                 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
1061
1062                 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
1063                 end_virtual = (unsigned long)net_device->recv_buf
1064                     + vmxferpage_packet->ranges[i].byte_offset
1065                     + vmxferpage_packet->ranges[i].byte_count - 1;
1066                 end = virt_to_phys((void *)end_virtual);
1067
1068                 /* Calculate the page relative offset */
1069                 netvsc_packet->page_buf[0].offset =
1070                         vmxferpage_packet->ranges[i].byte_offset &
1071                         (PAGE_SIZE - 1);
1072                 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
1073                         /* Handle frame across multiple pages: */
1074                         netvsc_packet->page_buf[0].len =
1075                                 (netvsc_packet->page_buf[0].pfn <<
1076                                  PAGE_SHIFT)
1077                                 + PAGE_SIZE - start;
1078                         bytes_remain = netvsc_packet->total_data_buflen -
1079                                         netvsc_packet->page_buf[0].len;
1080                         for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
1081                                 netvsc_packet->page_buf[j].offset = 0;
1082                                 if (bytes_remain <= PAGE_SIZE) {
1083                                         netvsc_packet->page_buf[j].len =
1084                                                 bytes_remain;
1085                                         bytes_remain = 0;
1086                                 } else {
1087                                         netvsc_packet->page_buf[j].len =
1088                                                 PAGE_SIZE;
1089                                         bytes_remain -= PAGE_SIZE;
1090                                 }
1091                                 netvsc_packet->page_buf[j].pfn =
1092                                     virt_to_phys((void *)(end_virtual -
1093                                                 bytes_remain)) >> PAGE_SHIFT;
1094                                 netvsc_packet->page_buf_cnt++;
1095                                 if (bytes_remain == 0)
1096                                         break;
1097                         }
1098                 }
1099
1100                 /* Pass it to the upper layer */
1101                 ((struct netvsc_driver *)device->drv)->
1102                         recv_cb(device, netvsc_packet);
1103
1104                 netvsc_receive_completion(netvsc_packet->
1105                                 completion.recv.recv_completion_ctx);
1106         }
1107
1108         put_net_device(device);
1109 }
1110
1111 static void netvsc_channel_cb(void *context)
1112 {
1113         int ret;
1114         struct hv_device *device = context;
1115         struct netvsc_device *net_device;
1116         u32 bytes_recvd;
1117         u64 request_id;
1118         unsigned char *packet;
1119         struct vmpacket_descriptor *desc;
1120         unsigned char *buffer;
1121         int bufferlen = NETVSC_PACKET_SIZE;
1122
1123         packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
1124                          GFP_ATOMIC);
1125         if (!packet)
1126                 return;
1127         buffer = packet;
1128
1129         net_device = get_inbound_net_device(device);
1130         if (!net_device) {
1131                 dev_err(&device->device, "net device (%p) shutting down..."
1132                            "ignoring inbound packets", net_device);
1133                 goto out;
1134         }
1135
1136         do {
1137                 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
1138                                            &bytes_recvd, &request_id);
1139                 if (ret == 0) {
1140                         if (bytes_recvd > 0) {
1141                                 desc = (struct vmpacket_descriptor *)buffer;
1142                                 switch (desc->type) {
1143                                 case VM_PKT_COMP:
1144                                         netvsc_send_completion(device, desc);
1145                                         break;
1146
1147                                 case VM_PKT_DATA_USING_XFER_PAGES:
1148                                         netvsc_receive(device, desc);
1149                                         break;
1150
1151                                 default:
1152                                         dev_err(&device->device,
1153                                                    "unhandled packet type %d, "
1154                                                    "tid %llx len %d\n",
1155                                                    desc->type, request_id,
1156                                                    bytes_recvd);
1157                                         break;
1158                                 }
1159
1160                                 /* reset */
1161                                 if (bufferlen > NETVSC_PACKET_SIZE) {
1162                                         kfree(buffer);
1163                                         buffer = packet;
1164                                         bufferlen = NETVSC_PACKET_SIZE;
1165                                 }
1166                         } else {
1167                                 /* reset */
1168                                 if (bufferlen > NETVSC_PACKET_SIZE) {
1169                                         kfree(buffer);
1170                                         buffer = packet;
1171                                         bufferlen = NETVSC_PACKET_SIZE;
1172                                 }
1173
1174                                 break;
1175                         }
1176                 } else if (ret == -2) {
1177                         /* Handle large packet */
1178                         buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1179                         if (buffer == NULL) {
1180                                 /* Try again next time around */
1181                                 dev_err(&device->device,
1182                                            "unable to allocate buffer of size "
1183                                            "(%d)!!", bytes_recvd);
1184                                 break;
1185                         }
1186
1187                         bufferlen = bytes_recvd;
1188                 }
1189         } while (1);
1190
1191         put_net_device(device);
1192 out:
1193         kfree(buffer);
1194         return;
1195 }
1196
1197 /*
1198  * netvsc_initialize - Main entry point
1199  */
1200 int netvsc_initialize(struct hv_driver *drv)
1201 {
1202         struct netvsc_driver *driver = (struct netvsc_driver *)drv;
1203
1204         drv->name = driver_name;
1205         memcpy(&drv->dev_type, &netvsc_device_type, sizeof(struct hv_guid));
1206
1207         /* Setup the dispatch table */
1208         driver->base.dev_add    = netvsc_device_add;
1209         driver->base.dev_rm     = netvsc_device_remove;
1210         driver->base.cleanup            = netvsc_cleanup;
1211
1212         driver->send                    = netvsc_send;
1213
1214         rndis_filter_init(driver);
1215         return 0;
1216 }