usb: wusbcore: allow wa_xfer_destroy to clean up partially constructed xfers
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / usb / wusbcore / wa-xfer.c
1 /*
2  * WUSB Wire Adapter
3  * Data transfer and URB enqueing
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * How transfers work: get a buffer, break it up in segments (segment
24  * size is a multiple of the maxpacket size). For each segment issue a
25  * segment request (struct wa_xfer_*), then send the data buffer if
26  * out or nothing if in (all over the DTO endpoint).
27  *
28  * For each submitted segment request, a notification will come over
29  * the NEP endpoint and a transfer result (struct xfer_result) will
30  * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31  * data coming (inbound transfer), schedule a read and handle it.
32  *
33  * Sounds simple, it is a pain to implement.
34  *
35  *
36  * ENTRY POINTS
37  *
38  *   FIXME
39  *
40  * LIFE CYCLE / STATE DIAGRAM
41  *
42  *   FIXME
43  *
44  * THIS CODE IS DISGUSTING
45  *
46  *   Warned you are; it's my second try and still not happy with it.
47  *
48  * NOTES:
49  *
50  *   - No iso
51  *
52  *   - Supports DMA xfers, control, bulk and maybe interrupt
53  *
54  *   - Does not recycle unused rpipes
55  *
56  *     An rpipe is assigned to an endpoint the first time it is used,
57  *     and then it's there, assigned, until the endpoint is disabled
58  *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59  *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60  *     (should be a mutex).
61  *
62  *     Two methods it could be done:
63  *
64  *     (a) set up a timer every time an rpipe's use count drops to 1
65  *         (which means unused) or when a transfer ends. Reset the
66  *         timer when a xfer is queued. If the timer expires, release
67  *         the rpipe [see rpipe_ep_disable()].
68  *
69  *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70  *         when none are found go over the list, check their endpoint
71  *         and their activity record (if no last-xfer-done-ts in the
72  *         last x seconds) take it
73  *
74  *     However, due to the fact that we have a set of limited
75  *     resources (max-segments-at-the-same-time per xfer,
76  *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77  *     we are going to have to rebuild all this based on an scheduler,
78  *     to where we have a list of transactions to do and based on the
79  *     availability of the different required components (blocks,
80  *     rpipes, segment slots, etc), we go scheduling them. Painful.
81  */
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
89
90 #include "wa-hc.h"
91 #include "wusbhc.h"
92
93 enum {
94         WA_SEGS_MAX = 255,
95 };
96
97 enum wa_seg_status {
98         WA_SEG_NOTREADY,
99         WA_SEG_READY,
100         WA_SEG_DELAYED,
101         WA_SEG_SUBMITTED,
102         WA_SEG_PENDING,
103         WA_SEG_DTI_PENDING,
104         WA_SEG_DONE,
105         WA_SEG_ERROR,
106         WA_SEG_ABORTED,
107 };
108
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
110
111 /*
112  * Life cycle governed by 'struct urb' (the refcount of the struct is
113  * that of the 'struct urb' and usb_free_urb() would free the whole
114  * struct).
115  */
116 struct wa_seg {
117         struct urb tr_urb;              /* transfer request urb. */
118         struct urb *dto_urb;            /* for data output. */
119         struct list_head list_node;     /* for rpipe->req_list */
120         struct wa_xfer *xfer;           /* out xfer */
121         u8 index;                       /* which segment we are */
122         enum wa_seg_status status;
123         ssize_t result;                 /* bytes xfered or error */
124         struct wa_xfer_hdr xfer_hdr;
125         u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
126 };
127
128 static inline void wa_seg_init(struct wa_seg *seg)
129 {
130         usb_init_urb(&seg->tr_urb);
131
132         /* set the remaining memory to 0. */
133         memset(((void *)seg) + sizeof(seg->tr_urb), 0,
134                 sizeof(*seg) - sizeof(seg->tr_urb));
135 }
136
137 /*
138  * Protected by xfer->lock
139  *
140  */
141 struct wa_xfer {
142         struct kref refcnt;
143         struct list_head list_node;
144         spinlock_t lock;
145         u32 id;
146
147         struct wahc *wa;                /* Wire adapter we are plugged to */
148         struct usb_host_endpoint *ep;
149         struct urb *urb;                /* URB we are transferring for */
150         struct wa_seg **seg;            /* transfer segments */
151         u8 segs, segs_submitted, segs_done;
152         unsigned is_inbound:1;
153         unsigned is_dma:1;
154         size_t seg_size;
155         int result;
156
157         gfp_t gfp;                      /* allocation mask */
158
159         struct wusb_dev *wusb_dev;      /* for activity timestamps */
160 };
161
162 static inline void wa_xfer_init(struct wa_xfer *xfer)
163 {
164         kref_init(&xfer->refcnt);
165         INIT_LIST_HEAD(&xfer->list_node);
166         spin_lock_init(&xfer->lock);
167 }
168
169 /*
170  * Destroy a transfer structure
171  *
172  * Note that freeing xfer->seg[cnt]->urb will free the containing
173  * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
174  */
175 static void wa_xfer_destroy(struct kref *_xfer)
176 {
177         struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
178         if (xfer->seg) {
179                 unsigned cnt;
180                 for (cnt = 0; cnt < xfer->segs; cnt++) {
181                         if (xfer->seg[cnt]) {
182                                 if (xfer->seg[cnt]->dto_urb) {
183                                         kfree(xfer->seg[cnt]->dto_urb->sg);
184                                         usb_free_urb(xfer->seg[cnt]->dto_urb);
185                                 }
186                                 usb_free_urb(&xfer->seg[cnt]->tr_urb);
187                         }
188                 }
189                 kfree(xfer->seg);
190         }
191         kfree(xfer);
192 }
193
194 static void wa_xfer_get(struct wa_xfer *xfer)
195 {
196         kref_get(&xfer->refcnt);
197 }
198
199 static void wa_xfer_put(struct wa_xfer *xfer)
200 {
201         kref_put(&xfer->refcnt, wa_xfer_destroy);
202 }
203
204 /*
205  * xfer is referenced
206  *
207  * xfer->lock has to be unlocked
208  *
209  * We take xfer->lock for setting the result; this is a barrier
210  * against drivers/usb/core/hcd.c:unlink1() being called after we call
211  * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
212  * reference to the transfer.
213  */
214 static void wa_xfer_giveback(struct wa_xfer *xfer)
215 {
216         unsigned long flags;
217
218         spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
219         list_del_init(&xfer->list_node);
220         spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
221         /* FIXME: segmentation broken -- kills DWA */
222         wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
223         wa_put(xfer->wa);
224         wa_xfer_put(xfer);
225 }
226
227 /*
228  * xfer is referenced
229  *
230  * xfer->lock has to be unlocked
231  */
232 static void wa_xfer_completion(struct wa_xfer *xfer)
233 {
234         if (xfer->wusb_dev)
235                 wusb_dev_put(xfer->wusb_dev);
236         rpipe_put(xfer->ep->hcpriv);
237         wa_xfer_giveback(xfer);
238 }
239
240 /*
241  * If transfer is done, wrap it up and return true
242  *
243  * xfer->lock has to be locked
244  */
245 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
246 {
247         struct device *dev = &xfer->wa->usb_iface->dev;
248         unsigned result, cnt;
249         struct wa_seg *seg;
250         struct urb *urb = xfer->urb;
251         unsigned found_short = 0;
252
253         result = xfer->segs_done == xfer->segs_submitted;
254         if (result == 0)
255                 goto out;
256         urb->actual_length = 0;
257         for (cnt = 0; cnt < xfer->segs; cnt++) {
258                 seg = xfer->seg[cnt];
259                 switch (seg->status) {
260                 case WA_SEG_DONE:
261                         if (found_short && seg->result > 0) {
262                                 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
263                                         xfer, cnt, seg->result);
264                                 urb->status = -EINVAL;
265                                 goto out;
266                         }
267                         urb->actual_length += seg->result;
268                         if (seg->result < xfer->seg_size
269                             && cnt != xfer->segs-1)
270                                 found_short = 1;
271                         dev_dbg(dev, "xfer %p#%u: DONE short %d "
272                                 "result %zu urb->actual_length %d\n",
273                                 xfer, seg->index, found_short, seg->result,
274                                 urb->actual_length);
275                         break;
276                 case WA_SEG_ERROR:
277                         xfer->result = seg->result;
278                         dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
279                                 xfer, seg->index, seg->result);
280                         goto out;
281                 case WA_SEG_ABORTED:
282                         dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
283                                 xfer, seg->index, urb->status);
284                         xfer->result = urb->status;
285                         goto out;
286                 default:
287                         dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
288                                  xfer, cnt, seg->status);
289                         xfer->result = -EINVAL;
290                         goto out;
291                 }
292         }
293         xfer->result = 0;
294 out:
295         return result;
296 }
297
298 /*
299  * Initialize a transfer's ID
300  *
301  * We need to use a sequential number; if we use the pointer or the
302  * hash of the pointer, it can repeat over sequential transfers and
303  * then it will confuse the HWA....wonder why in hell they put a 32
304  * bit handle in there then.
305  */
306 static void wa_xfer_id_init(struct wa_xfer *xfer)
307 {
308         xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
309 }
310
311 /*
312  * Return the xfer's ID associated with xfer
313  *
314  * Need to generate a
315  */
316 static u32 wa_xfer_id(struct wa_xfer *xfer)
317 {
318         return xfer->id;
319 }
320
321 /*
322  * Search for a transfer list ID on the HCD's URB list
323  *
324  * For 32 bit architectures, we use the pointer itself; for 64 bits, a
325  * 32-bit hash of the pointer.
326  *
327  * @returns NULL if not found.
328  */
329 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
330 {
331         unsigned long flags;
332         struct wa_xfer *xfer_itr;
333         spin_lock_irqsave(&wa->xfer_list_lock, flags);
334         list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
335                 if (id == xfer_itr->id) {
336                         wa_xfer_get(xfer_itr);
337                         goto out;
338                 }
339         }
340         xfer_itr = NULL;
341 out:
342         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
343         return xfer_itr;
344 }
345
346 struct wa_xfer_abort_buffer {
347         struct urb urb;
348         struct wa_xfer_abort cmd;
349 };
350
351 static void __wa_xfer_abort_cb(struct urb *urb)
352 {
353         struct wa_xfer_abort_buffer *b = urb->context;
354         usb_put_urb(&b->urb);
355 }
356
357 /*
358  * Aborts an ongoing transaction
359  *
360  * Assumes the transfer is referenced and locked and in a submitted
361  * state (mainly that there is an endpoint/rpipe assigned).
362  *
363  * The callback (see above) does nothing but freeing up the data by
364  * putting the URB. Because the URB is allocated at the head of the
365  * struct, the whole space we allocated is kfreed.
366  *
367  * We'll get an 'aborted transaction' xfer result on DTI, that'll
368  * politely ignore because at this point the transaction has been
369  * marked as aborted already.
370  */
371 static void __wa_xfer_abort(struct wa_xfer *xfer)
372 {
373         int result;
374         struct device *dev = &xfer->wa->usb_iface->dev;
375         struct wa_xfer_abort_buffer *b;
376         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
377
378         b = kmalloc(sizeof(*b), GFP_ATOMIC);
379         if (b == NULL)
380                 goto error_kmalloc;
381         b->cmd.bLength =  sizeof(b->cmd);
382         b->cmd.bRequestType = WA_XFER_ABORT;
383         b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
384         b->cmd.dwTransferID = wa_xfer_id(xfer);
385
386         usb_init_urb(&b->urb);
387         usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
388                 usb_sndbulkpipe(xfer->wa->usb_dev,
389                                 xfer->wa->dto_epd->bEndpointAddress),
390                 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
391         result = usb_submit_urb(&b->urb, GFP_ATOMIC);
392         if (result < 0)
393                 goto error_submit;
394         return;                         /* callback frees! */
395
396
397 error_submit:
398         if (printk_ratelimit())
399                 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
400                         xfer, result);
401         kfree(b);
402 error_kmalloc:
403         return;
404
405 }
406
407 /*
408  *
409  * @returns < 0 on error, transfer segment request size if ok
410  */
411 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
412                                      enum wa_xfer_type *pxfer_type)
413 {
414         ssize_t result;
415         struct device *dev = &xfer->wa->usb_iface->dev;
416         size_t maxpktsize;
417         struct urb *urb = xfer->urb;
418         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
419
420         switch (rpipe->descr.bmAttribute & 0x3) {
421         case USB_ENDPOINT_XFER_CONTROL:
422                 *pxfer_type = WA_XFER_TYPE_CTL;
423                 result = sizeof(struct wa_xfer_ctl);
424                 break;
425         case USB_ENDPOINT_XFER_INT:
426         case USB_ENDPOINT_XFER_BULK:
427                 *pxfer_type = WA_XFER_TYPE_BI;
428                 result = sizeof(struct wa_xfer_bi);
429                 break;
430         case USB_ENDPOINT_XFER_ISOC:
431                 dev_err(dev, "FIXME: ISOC not implemented\n");
432                 result = -ENOSYS;
433                 goto error;
434         default:
435                 /* never happens */
436                 BUG();
437                 result = -EINVAL;       /* shut gcc up */
438         };
439         xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
440         xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
441         xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
442                 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
443         /* Compute the segment size and make sure it is a multiple of
444          * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
445          * a check (FIXME) */
446         maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
447         if (xfer->seg_size < maxpktsize) {
448                 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
449                         "%zu\n", xfer->seg_size, maxpktsize);
450                 result = -EINVAL;
451                 goto error;
452         }
453         xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
454         xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
455         if (xfer->segs >= WA_SEGS_MAX) {
456                 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
457                         (int)(urb->transfer_buffer_length / xfer->seg_size),
458                         WA_SEGS_MAX);
459                 result = -EINVAL;
460                 goto error;
461         }
462         if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
463                 xfer->segs = 1;
464 error:
465         return result;
466 }
467
468 /* Fill in the common request header and xfer-type specific data. */
469 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
470                                  struct wa_xfer_hdr *xfer_hdr0,
471                                  enum wa_xfer_type xfer_type,
472                                  size_t xfer_hdr_size)
473 {
474         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
475
476         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
477         xfer_hdr0->bLength = xfer_hdr_size;
478         xfer_hdr0->bRequestType = xfer_type;
479         xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
480         xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
481         xfer_hdr0->bTransferSegment = 0;
482         switch (xfer_type) {
483         case WA_XFER_TYPE_CTL: {
484                 struct wa_xfer_ctl *xfer_ctl =
485                         container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
486                 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
487                 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
488                        sizeof(xfer_ctl->baSetupData));
489                 break;
490         }
491         case WA_XFER_TYPE_BI:
492                 break;
493         case WA_XFER_TYPE_ISO:
494                 printk(KERN_ERR "FIXME: ISOC not implemented\n");
495         default:
496                 BUG();
497         };
498 }
499
500 /*
501  * Callback for the OUT data phase of the segment request
502  *
503  * Check wa_seg_tr_cb(); most comments also apply here because this
504  * function does almost the same thing and they work closely
505  * together.
506  *
507  * If the seg request has failed but this DTO phase has succeeded,
508  * wa_seg_tr_cb() has already failed the segment and moved the
509  * status to WA_SEG_ERROR, so this will go through 'case 0' and
510  * effectively do nothing.
511  */
512 static void wa_seg_dto_cb(struct urb *urb)
513 {
514         struct wa_seg *seg = urb->context;
515         struct wa_xfer *xfer = seg->xfer;
516         struct wahc *wa;
517         struct device *dev;
518         struct wa_rpipe *rpipe;
519         unsigned long flags;
520         unsigned rpipe_ready = 0;
521         u8 done = 0;
522
523         switch (urb->status) {
524         case 0:
525                 spin_lock_irqsave(&xfer->lock, flags);
526                 wa = xfer->wa;
527                 dev = &wa->usb_iface->dev;
528                 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
529                         xfer, seg->index, urb->actual_length);
530                 if (seg->status < WA_SEG_PENDING)
531                         seg->status = WA_SEG_PENDING;
532                 seg->result = urb->actual_length;
533                 spin_unlock_irqrestore(&xfer->lock, flags);
534                 break;
535         case -ECONNRESET:       /* URB unlinked; no need to do anything */
536         case -ENOENT:           /* as it was done by the who unlinked us */
537                 break;
538         default:                /* Other errors ... */
539                 spin_lock_irqsave(&xfer->lock, flags);
540                 wa = xfer->wa;
541                 dev = &wa->usb_iface->dev;
542                 rpipe = xfer->ep->hcpriv;
543                 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
544                         xfer, seg->index, urb->status);
545                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
546                             EDC_ERROR_TIMEFRAME)){
547                         dev_err(dev, "DTO: URB max acceptable errors "
548                                 "exceeded, resetting device\n");
549                         wa_reset_all(wa);
550                 }
551                 if (seg->status != WA_SEG_ERROR) {
552                         seg->status = WA_SEG_ERROR;
553                         seg->result = urb->status;
554                         xfer->segs_done++;
555                         __wa_xfer_abort(xfer);
556                         rpipe_ready = rpipe_avail_inc(rpipe);
557                         done = __wa_xfer_is_done(xfer);
558                 }
559                 spin_unlock_irqrestore(&xfer->lock, flags);
560                 if (done)
561                         wa_xfer_completion(xfer);
562                 if (rpipe_ready)
563                         wa_xfer_delayed_run(rpipe);
564         }
565 }
566
567 /*
568  * Callback for the segment request
569  *
570  * If successful transition state (unless already transitioned or
571  * outbound transfer); otherwise, take a note of the error, mark this
572  * segment done and try completion.
573  *
574  * Note we don't access until we are sure that the transfer hasn't
575  * been cancelled (ECONNRESET, ENOENT), which could mean that
576  * seg->xfer could be already gone.
577  *
578  * We have to check before setting the status to WA_SEG_PENDING
579  * because sometimes the xfer result callback arrives before this
580  * callback (geeeeeeze), so it might happen that we are already in
581  * another state. As well, we don't set it if the transfer is inbound,
582  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
583  * finishes.
584  */
585 static void wa_seg_tr_cb(struct urb *urb)
586 {
587         struct wa_seg *seg = urb->context;
588         struct wa_xfer *xfer = seg->xfer;
589         struct wahc *wa;
590         struct device *dev;
591         struct wa_rpipe *rpipe;
592         unsigned long flags;
593         unsigned rpipe_ready;
594         u8 done = 0;
595
596         switch (urb->status) {
597         case 0:
598                 spin_lock_irqsave(&xfer->lock, flags);
599                 wa = xfer->wa;
600                 dev = &wa->usb_iface->dev;
601                 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
602                 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
603                         seg->status = WA_SEG_PENDING;
604                 spin_unlock_irqrestore(&xfer->lock, flags);
605                 break;
606         case -ECONNRESET:       /* URB unlinked; no need to do anything */
607         case -ENOENT:           /* as it was done by the who unlinked us */
608                 break;
609         default:                /* Other errors ... */
610                 spin_lock_irqsave(&xfer->lock, flags);
611                 wa = xfer->wa;
612                 dev = &wa->usb_iface->dev;
613                 rpipe = xfer->ep->hcpriv;
614                 if (printk_ratelimit())
615                         dev_err(dev, "xfer %p#%u: request error %d\n",
616                                 xfer, seg->index, urb->status);
617                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
618                             EDC_ERROR_TIMEFRAME)){
619                         dev_err(dev, "DTO: URB max acceptable errors "
620                                 "exceeded, resetting device\n");
621                         wa_reset_all(wa);
622                 }
623                 usb_unlink_urb(seg->dto_urb);
624                 seg->status = WA_SEG_ERROR;
625                 seg->result = urb->status;
626                 xfer->segs_done++;
627                 __wa_xfer_abort(xfer);
628                 rpipe_ready = rpipe_avail_inc(rpipe);
629                 done = __wa_xfer_is_done(xfer);
630                 spin_unlock_irqrestore(&xfer->lock, flags);
631                 if (done)
632                         wa_xfer_completion(xfer);
633                 if (rpipe_ready)
634                         wa_xfer_delayed_run(rpipe);
635         }
636 }
637
638 /* allocate an SG list to store bytes_to_transfer bytes and copy the
639  * subset of the in_sg that matches the buffer subset
640  * we are about to transfer. */
641 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
642         const unsigned int bytes_transferred,
643         const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
644 {
645         struct scatterlist *out_sg;
646         unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
647                 nents;
648         struct scatterlist *current_xfer_sg = in_sg;
649         struct scatterlist *current_seg_sg, *last_seg_sg;
650
651         /* skip previously transferred pages. */
652         while ((current_xfer_sg) &&
653                         (bytes_processed < bytes_transferred)) {
654                 bytes_processed += current_xfer_sg->length;
655
656                 /* advance the sg if current segment starts on or past the
657                         next page. */
658                 if (bytes_processed <= bytes_transferred)
659                         current_xfer_sg = sg_next(current_xfer_sg);
660         }
661
662         /* the data for the current segment starts in current_xfer_sg.
663                 calculate the offset. */
664         if (bytes_processed > bytes_transferred) {
665                 offset_into_current_page_data = current_xfer_sg->length -
666                         (bytes_processed - bytes_transferred);
667         }
668
669         /* calculate the number of pages needed by this segment. */
670         nents = DIV_ROUND_UP((bytes_to_transfer +
671                 offset_into_current_page_data +
672                 current_xfer_sg->offset),
673                 PAGE_SIZE);
674
675         out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
676         if (out_sg) {
677                 sg_init_table(out_sg, nents);
678
679                 /* copy the portion of the incoming SG that correlates to the
680                  * data to be transferred by this segment to the segment SG. */
681                 last_seg_sg = current_seg_sg = out_sg;
682                 bytes_processed = 0;
683
684                 /* reset nents and calculate the actual number of sg entries
685                         needed. */
686                 nents = 0;
687                 while ((bytes_processed < bytes_to_transfer) &&
688                                 current_seg_sg && current_xfer_sg) {
689                         unsigned int page_len = min((current_xfer_sg->length -
690                                 offset_into_current_page_data),
691                                 (bytes_to_transfer - bytes_processed));
692
693                         sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
694                                 page_len,
695                                 current_xfer_sg->offset +
696                                 offset_into_current_page_data);
697
698                         bytes_processed += page_len;
699
700                         last_seg_sg = current_seg_sg;
701                         current_seg_sg = sg_next(current_seg_sg);
702                         current_xfer_sg = sg_next(current_xfer_sg);
703
704                         /* only the first page may require additional offset. */
705                         offset_into_current_page_data = 0;
706                         nents++;
707                 }
708
709                 /* update num_sgs and terminate the list since we may have
710                  *  concatenated pages. */
711                 sg_mark_end(last_seg_sg);
712                 *out_num_sgs = nents;
713         }
714
715         return out_sg;
716 }
717
718 /*
719  * Allocate the segs array and initialize each of them
720  *
721  * The segments are freed by wa_xfer_destroy() when the xfer use count
722  * drops to zero; however, because each segment is given the same life
723  * cycle as the USB URB it contains, it is actually freed by
724  * usb_put_urb() on the contained USB URB (twisted, eh?).
725  */
726 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
727 {
728         int result, cnt;
729         size_t alloc_size = sizeof(*xfer->seg[0])
730                 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
731         struct usb_device *usb_dev = xfer->wa->usb_dev;
732         const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
733         struct wa_seg *seg;
734         size_t buf_itr, buf_size, buf_itr_size;
735
736         result = -ENOMEM;
737         xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
738         if (xfer->seg == NULL)
739                 goto error_segs_kzalloc;
740         buf_itr = 0;
741         buf_size = xfer->urb->transfer_buffer_length;
742         for (cnt = 0; cnt < xfer->segs; cnt++) {
743                 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
744                 if (seg == NULL)
745                         goto error_seg_kmalloc;
746                 wa_seg_init(seg);
747                 seg->xfer = xfer;
748                 seg->index = cnt;
749                 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
750                                   usb_sndbulkpipe(usb_dev,
751                                                   dto_epd->bEndpointAddress),
752                                   &seg->xfer_hdr, xfer_hdr_size,
753                                   wa_seg_tr_cb, seg);
754                 buf_itr_size = min(buf_size, xfer->seg_size);
755                 if (xfer->is_inbound == 0 && buf_size > 0) {
756                         /* outbound data. */
757                         seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
758                         if (seg->dto_urb == NULL)
759                                 goto error_dto_alloc;
760                         usb_fill_bulk_urb(
761                                 seg->dto_urb, usb_dev,
762                                 usb_sndbulkpipe(usb_dev,
763                                                 dto_epd->bEndpointAddress),
764                                 NULL, 0, wa_seg_dto_cb, seg);
765                         if (xfer->is_dma) {
766                                 seg->dto_urb->transfer_dma =
767                                         xfer->urb->transfer_dma + buf_itr;
768                                 seg->dto_urb->transfer_flags |=
769                                         URB_NO_TRANSFER_DMA_MAP;
770                                 seg->dto_urb->transfer_buffer = NULL;
771                                 seg->dto_urb->sg = NULL;
772                                 seg->dto_urb->num_sgs = 0;
773                         } else {
774                                 /* do buffer or SG processing. */
775                                 seg->dto_urb->transfer_flags &=
776                                         ~URB_NO_TRANSFER_DMA_MAP;
777                                 /* this should always be 0 before a resubmit. */
778                                 seg->dto_urb->num_mapped_sgs = 0;
779
780                                 if (xfer->urb->transfer_buffer) {
781                                         seg->dto_urb->transfer_buffer =
782                                                 xfer->urb->transfer_buffer +
783                                                 buf_itr;
784                                         seg->dto_urb->sg = NULL;
785                                         seg->dto_urb->num_sgs = 0;
786                                 } else {
787                                         /* allocate an SG list to store seg_size
788                                             bytes and copy the subset of the
789                                             xfer->urb->sg that matches the
790                                             buffer subset we are about to read.
791                                         */
792                                         seg->dto_urb->sg =
793                                                 wa_xfer_create_subset_sg(
794                                                 xfer->urb->sg,
795                                                 buf_itr, buf_itr_size,
796                                                 &(seg->dto_urb->num_sgs));
797
798                                         if (!(seg->dto_urb->sg)) {
799                                                 seg->dto_urb->num_sgs   = 0;
800                                                 goto error_sg_alloc;
801                                         }
802
803                                         seg->dto_urb->transfer_buffer = NULL;
804                                 }
805                         }
806                         seg->dto_urb->transfer_buffer_length = buf_itr_size;
807                 }
808                 seg->status = WA_SEG_READY;
809                 buf_itr += buf_itr_size;
810                 buf_size -= buf_itr_size;
811         }
812         return 0;
813
814 error_sg_alloc:
815         usb_free_urb(xfer->seg[cnt]->dto_urb);
816 error_dto_alloc:
817         kfree(xfer->seg[cnt]);
818         cnt--;
819 error_seg_kmalloc:
820         /* use the fact that cnt is left at were it failed */
821         for (; cnt >= 0; cnt--) {
822                 if (xfer->seg[cnt] && xfer->is_inbound == 0) {
823                         usb_free_urb(xfer->seg[cnt]->dto_urb);
824                         kfree(xfer->seg[cnt]->dto_urb->sg);
825                 }
826                 kfree(xfer->seg[cnt]);
827         }
828 error_segs_kzalloc:
829         return result;
830 }
831
832 /*
833  * Allocates all the stuff needed to submit a transfer
834  *
835  * Breaks the whole data buffer in a list of segments, each one has a
836  * structure allocated to it and linked in xfer->seg[index]
837  *
838  * FIXME: merge setup_segs() and the last part of this function, no
839  *        need to do two for loops when we could run everything in a
840  *        single one
841  */
842 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
843 {
844         int result;
845         struct device *dev = &xfer->wa->usb_iface->dev;
846         enum wa_xfer_type xfer_type = 0; /* shut up GCC */
847         size_t xfer_hdr_size, cnt, transfer_size;
848         struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
849
850         result = __wa_xfer_setup_sizes(xfer, &xfer_type);
851         if (result < 0)
852                 goto error_setup_sizes;
853         xfer_hdr_size = result;
854         result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
855         if (result < 0) {
856                 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
857                         xfer, xfer->segs, result);
858                 goto error_setup_segs;
859         }
860         /* Fill the first header */
861         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
862         wa_xfer_id_init(xfer);
863         __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
864
865         /* Fill remainig headers */
866         xfer_hdr = xfer_hdr0;
867         transfer_size = urb->transfer_buffer_length;
868         xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
869                 xfer->seg_size : transfer_size;
870         transfer_size -=  xfer->seg_size;
871         for (cnt = 1; cnt < xfer->segs; cnt++) {
872                 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
873                 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
874                 xfer_hdr->bTransferSegment = cnt;
875                 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
876                         cpu_to_le32(xfer->seg_size)
877                         : cpu_to_le32(transfer_size);
878                 xfer->seg[cnt]->status = WA_SEG_READY;
879                 transfer_size -=  xfer->seg_size;
880         }
881         xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
882         result = 0;
883 error_setup_segs:
884 error_setup_sizes:
885         return result;
886 }
887
888 /*
889  *
890  *
891  * rpipe->seg_lock is held!
892  */
893 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
894                            struct wa_seg *seg)
895 {
896         int result;
897         /* submit the transfer request. */
898         result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
899         if (result < 0) {
900                 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
901                        xfer, seg->index, result);
902                 goto error_seg_submit;
903         }
904         /* submit the out data if this is an out request. */
905         if (seg->dto_urb) {
906                 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
907                 if (result < 0) {
908                         printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
909                                xfer, seg->index, result);
910                         goto error_dto_submit;
911                 }
912         }
913         seg->status = WA_SEG_SUBMITTED;
914         rpipe_avail_dec(rpipe);
915         return 0;
916
917 error_dto_submit:
918         usb_unlink_urb(&seg->tr_urb);
919 error_seg_submit:
920         seg->status = WA_SEG_ERROR;
921         seg->result = result;
922         return result;
923 }
924
925 /*
926  * Execute more queued request segments until the maximum concurrent allowed
927  *
928  * The ugly unlock/lock sequence on the error path is needed as the
929  * xfer->lock normally nests the seg_lock and not viceversa.
930  *
931  */
932 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
933 {
934         int result;
935         struct device *dev = &rpipe->wa->usb_iface->dev;
936         struct wa_seg *seg;
937         struct wa_xfer *xfer;
938         unsigned long flags;
939
940         spin_lock_irqsave(&rpipe->seg_lock, flags);
941         while (atomic_read(&rpipe->segs_available) > 0
942               && !list_empty(&rpipe->seg_list)) {
943                 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
944                                  list_node);
945                 list_del(&seg->list_node);
946                 xfer = seg->xfer;
947                 result = __wa_seg_submit(rpipe, xfer, seg);
948                 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
949                         xfer, seg->index, atomic_read(&rpipe->segs_available), result);
950                 if (unlikely(result < 0)) {
951                         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
952                         spin_lock_irqsave(&xfer->lock, flags);
953                         __wa_xfer_abort(xfer);
954                         xfer->segs_done++;
955                         spin_unlock_irqrestore(&xfer->lock, flags);
956                         spin_lock_irqsave(&rpipe->seg_lock, flags);
957                 }
958         }
959         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
960 }
961
962 /*
963  *
964  * xfer->lock is taken
965  *
966  * On failure submitting we just stop submitting and return error;
967  * wa_urb_enqueue_b() will execute the completion path
968  */
969 static int __wa_xfer_submit(struct wa_xfer *xfer)
970 {
971         int result;
972         struct wahc *wa = xfer->wa;
973         struct device *dev = &wa->usb_iface->dev;
974         unsigned cnt;
975         struct wa_seg *seg;
976         unsigned long flags;
977         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
978         size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
979         u8 available;
980         u8 empty;
981
982         spin_lock_irqsave(&wa->xfer_list_lock, flags);
983         list_add_tail(&xfer->list_node, &wa->xfer_list);
984         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
985
986         BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
987         result = 0;
988         spin_lock_irqsave(&rpipe->seg_lock, flags);
989         for (cnt = 0; cnt < xfer->segs; cnt++) {
990                 available = atomic_read(&rpipe->segs_available);
991                 empty = list_empty(&rpipe->seg_list);
992                 seg = xfer->seg[cnt];
993                 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
994                         xfer, cnt, available, empty,
995                         available == 0 || !empty ? "delayed" : "submitted");
996                 if (available == 0 || !empty) {
997                         dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
998                         seg->status = WA_SEG_DELAYED;
999                         list_add_tail(&seg->list_node, &rpipe->seg_list);
1000                 } else {
1001                         result = __wa_seg_submit(rpipe, xfer, seg);
1002                         if (result < 0) {
1003                                 __wa_xfer_abort(xfer);
1004                                 goto error_seg_submit;
1005                         }
1006                 }
1007                 xfer->segs_submitted++;
1008         }
1009 error_seg_submit:
1010         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1011         return result;
1012 }
1013
1014 /*
1015  * Second part of a URB/transfer enqueuement
1016  *
1017  * Assumes this comes from wa_urb_enqueue() [maybe through
1018  * wa_urb_enqueue_run()]. At this point:
1019  *
1020  * xfer->wa     filled and refcounted
1021  * xfer->ep     filled with rpipe refcounted if
1022  *              delayed == 0
1023  * xfer->urb    filled and refcounted (this is the case when called
1024  *              from wa_urb_enqueue() as we come from usb_submit_urb()
1025  *              and when called by wa_urb_enqueue_run(), as we took an
1026  *              extra ref dropped by _run() after we return).
1027  * xfer->gfp    filled
1028  *
1029  * If we fail at __wa_xfer_submit(), then we just check if we are done
1030  * and if so, we run the completion procedure. However, if we are not
1031  * yet done, we do nothing and wait for the completion handlers from
1032  * the submitted URBs or from the xfer-result path to kick in. If xfer
1033  * result never kicks in, the xfer will timeout from the USB code and
1034  * dequeue() will be called.
1035  */
1036 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1037 {
1038         int result;
1039         unsigned long flags;
1040         struct urb *urb = xfer->urb;
1041         struct wahc *wa = xfer->wa;
1042         struct wusbhc *wusbhc = wa->wusb;
1043         struct wusb_dev *wusb_dev;
1044         unsigned done;
1045
1046         result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1047         if (result < 0)
1048                 goto error_rpipe_get;
1049         result = -ENODEV;
1050         /* FIXME: segmentation broken -- kills DWA */
1051         mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
1052         if (urb->dev == NULL) {
1053                 mutex_unlock(&wusbhc->mutex);
1054                 goto error_dev_gone;
1055         }
1056         wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1057         if (wusb_dev == NULL) {
1058                 mutex_unlock(&wusbhc->mutex);
1059                 goto error_dev_gone;
1060         }
1061         mutex_unlock(&wusbhc->mutex);
1062
1063         spin_lock_irqsave(&xfer->lock, flags);
1064         xfer->wusb_dev = wusb_dev;
1065         result = urb->status;
1066         if (urb->status != -EINPROGRESS)
1067                 goto error_dequeued;
1068
1069         result = __wa_xfer_setup(xfer, urb);
1070         if (result < 0)
1071                 goto error_xfer_setup;
1072         result = __wa_xfer_submit(xfer);
1073         if (result < 0)
1074                 goto error_xfer_submit;
1075         spin_unlock_irqrestore(&xfer->lock, flags);
1076         return;
1077
1078         /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1079          * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1080          * upundo setup().
1081          */
1082 error_xfer_setup:
1083 error_dequeued:
1084         spin_unlock_irqrestore(&xfer->lock, flags);
1085         /* FIXME: segmentation broken, kills DWA */
1086         if (wusb_dev)
1087                 wusb_dev_put(wusb_dev);
1088 error_dev_gone:
1089         rpipe_put(xfer->ep->hcpriv);
1090 error_rpipe_get:
1091         xfer->result = result;
1092         wa_xfer_giveback(xfer);
1093         return;
1094
1095 error_xfer_submit:
1096         done = __wa_xfer_is_done(xfer);
1097         xfer->result = result;
1098         spin_unlock_irqrestore(&xfer->lock, flags);
1099         if (done)
1100                 wa_xfer_completion(xfer);
1101 }
1102
1103 /*
1104  * Execute the delayed transfers in the Wire Adapter @wa
1105  *
1106  * We need to be careful here, as dequeue() could be called in the
1107  * middle.  That's why we do the whole thing under the
1108  * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1109  * and then checks the list -- so as we would be acquiring in inverse
1110  * order, we move the delayed list to a separate list while locked and then
1111  * submit them without the list lock held.
1112  */
1113 void wa_urb_enqueue_run(struct work_struct *ws)
1114 {
1115         struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1116         struct wa_xfer *xfer, *next;
1117         struct urb *urb;
1118         LIST_HEAD(tmp_list);
1119
1120         /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1121         spin_lock_irq(&wa->xfer_list_lock);
1122         list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1123                         wa->xfer_delayed_list.prev);
1124         spin_unlock_irq(&wa->xfer_list_lock);
1125
1126         /*
1127          * enqueue from temp list without list lock held since wa_urb_enqueue_b
1128          * can take xfer->lock as well as lock mutexes.
1129          */
1130         list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1131                 list_del_init(&xfer->list_node);
1132
1133                 urb = xfer->urb;
1134                 wa_urb_enqueue_b(xfer);
1135                 usb_put_urb(urb);       /* taken when queuing */
1136         }
1137 }
1138 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1139
1140 /*
1141  * Process the errored transfers on the Wire Adapter outside of interrupt.
1142  */
1143 void wa_process_errored_transfers_run(struct work_struct *ws)
1144 {
1145         struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1146         struct wa_xfer *xfer, *next;
1147         LIST_HEAD(tmp_list);
1148
1149         pr_info("%s: Run delayed STALL processing.\n", __func__);
1150
1151         /* Create a copy of the wa->xfer_errored_list while holding the lock */
1152         spin_lock_irq(&wa->xfer_list_lock);
1153         list_cut_position(&tmp_list, &wa->xfer_errored_list,
1154                         wa->xfer_errored_list.prev);
1155         spin_unlock_irq(&wa->xfer_list_lock);
1156
1157         /*
1158          * run rpipe_clear_feature_stalled from temp list without list lock
1159          * held.
1160          */
1161         list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1162                 struct usb_host_endpoint *ep;
1163                 unsigned long flags;
1164                 struct wa_rpipe *rpipe;
1165
1166                 spin_lock_irqsave(&xfer->lock, flags);
1167                 ep = xfer->ep;
1168                 rpipe = ep->hcpriv;
1169                 spin_unlock_irqrestore(&xfer->lock, flags);
1170
1171                 /* clear RPIPE feature stalled without holding a lock. */
1172                 rpipe_clear_feature_stalled(wa, ep);
1173
1174                 /* complete the xfer. This removes it from the tmp list. */
1175                 wa_xfer_completion(xfer);
1176
1177                 /* check for work. */
1178                 wa_xfer_delayed_run(rpipe);
1179         }
1180 }
1181 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1182
1183 /*
1184  * Submit a transfer to the Wire Adapter in a delayed way
1185  *
1186  * The process of enqueuing involves possible sleeps() [see
1187  * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1188  * in an atomic section, we defer the enqueue_b() call--else we call direct.
1189  *
1190  * @urb: We own a reference to it done by the HCI Linux USB stack that
1191  *       will be given up by calling usb_hcd_giveback_urb() or by
1192  *       returning error from this function -> ergo we don't have to
1193  *       refcount it.
1194  */
1195 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1196                    struct urb *urb, gfp_t gfp)
1197 {
1198         int result;
1199         struct device *dev = &wa->usb_iface->dev;
1200         struct wa_xfer *xfer;
1201         unsigned long my_flags;
1202         unsigned cant_sleep = irqs_disabled() | in_atomic();
1203
1204         if ((urb->transfer_buffer == NULL)
1205             && (urb->sg == NULL)
1206             && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1207             && urb->transfer_buffer_length != 0) {
1208                 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1209                 dump_stack();
1210         }
1211
1212         result = -ENOMEM;
1213         xfer = kzalloc(sizeof(*xfer), gfp);
1214         if (xfer == NULL)
1215                 goto error_kmalloc;
1216
1217         result = -ENOENT;
1218         if (urb->status != -EINPROGRESS)        /* cancelled */
1219                 goto error_dequeued;            /* before starting? */
1220         wa_xfer_init(xfer);
1221         xfer->wa = wa_get(wa);
1222         xfer->urb = urb;
1223         xfer->gfp = gfp;
1224         xfer->ep = ep;
1225         urb->hcpriv = xfer;
1226
1227         dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1228                 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1229                 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1230                 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1231                 cant_sleep ? "deferred" : "inline");
1232
1233         if (cant_sleep) {
1234                 usb_get_urb(urb);
1235                 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1236                 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1237                 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1238                 queue_work(wusbd, &wa->xfer_enqueue_work);
1239         } else {
1240                 wa_urb_enqueue_b(xfer);
1241         }
1242         return 0;
1243
1244 error_dequeued:
1245         kfree(xfer);
1246 error_kmalloc:
1247         return result;
1248 }
1249 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1250
1251 /*
1252  * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1253  * handler] is called.
1254  *
1255  * Until a transfer goes successfully through wa_urb_enqueue() it
1256  * needs to be dequeued with completion calling; when stuck in delayed
1257  * or before wa_xfer_setup() is called, we need to do completion.
1258  *
1259  *  not setup  If there is no hcpriv yet, that means that that enqueue
1260  *             still had no time to set the xfer up. Because
1261  *             urb->status should be other than -EINPROGRESS,
1262  *             enqueue() will catch that and bail out.
1263  *
1264  * If the transfer has gone through setup, we just need to clean it
1265  * up. If it has gone through submit(), we have to abort it [with an
1266  * asynch request] and then make sure we cancel each segment.
1267  *
1268  */
1269 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1270 {
1271         unsigned long flags, flags2;
1272         struct wa_xfer *xfer;
1273         struct wa_seg *seg;
1274         struct wa_rpipe *rpipe;
1275         unsigned cnt;
1276         unsigned rpipe_ready = 0;
1277
1278         xfer = urb->hcpriv;
1279         if (xfer == NULL) {
1280                 /*
1281                  * Nothing setup yet enqueue will see urb->status !=
1282                  * -EINPROGRESS (by hcd layer) and bail out with
1283                  * error, no need to do completion
1284                  */
1285                 BUG_ON(urb->status == -EINPROGRESS);
1286                 goto out;
1287         }
1288         spin_lock_irqsave(&xfer->lock, flags);
1289         rpipe = xfer->ep->hcpriv;
1290         if (rpipe == NULL) {
1291                 pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s",
1292                         __func__, wa_xfer_id(xfer),
1293                         "Probably already aborted.\n" );
1294                 goto out_unlock;
1295         }
1296         /* Check the delayed list -> if there, release and complete */
1297         spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1298         if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1299                 goto dequeue_delayed;
1300         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1301         if (xfer->seg == NULL)          /* still hasn't reached */
1302                 goto out_unlock;        /* setup(), enqueue_b() completes */
1303         /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1304         __wa_xfer_abort(xfer);
1305         for (cnt = 0; cnt < xfer->segs; cnt++) {
1306                 seg = xfer->seg[cnt];
1307                 switch (seg->status) {
1308                 case WA_SEG_NOTREADY:
1309                 case WA_SEG_READY:
1310                         printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1311                                xfer, cnt, seg->status);
1312                         WARN_ON(1);
1313                         break;
1314                 case WA_SEG_DELAYED:
1315                         seg->status = WA_SEG_ABORTED;
1316                         spin_lock_irqsave(&rpipe->seg_lock, flags2);
1317                         list_del(&seg->list_node);
1318                         xfer->segs_done++;
1319                         rpipe_ready = rpipe_avail_inc(rpipe);
1320                         spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1321                         break;
1322                 case WA_SEG_SUBMITTED:
1323                         seg->status = WA_SEG_ABORTED;
1324                         usb_unlink_urb(&seg->tr_urb);
1325                         if (xfer->is_inbound == 0)
1326                                 usb_unlink_urb(seg->dto_urb);
1327                         xfer->segs_done++;
1328                         rpipe_ready = rpipe_avail_inc(rpipe);
1329                         break;
1330                 case WA_SEG_PENDING:
1331                         seg->status = WA_SEG_ABORTED;
1332                         xfer->segs_done++;
1333                         rpipe_ready = rpipe_avail_inc(rpipe);
1334                         break;
1335                 case WA_SEG_DTI_PENDING:
1336                         usb_unlink_urb(wa->dti_urb);
1337                         seg->status = WA_SEG_ABORTED;
1338                         xfer->segs_done++;
1339                         rpipe_ready = rpipe_avail_inc(rpipe);
1340                         break;
1341                 case WA_SEG_DONE:
1342                 case WA_SEG_ERROR:
1343                 case WA_SEG_ABORTED:
1344                         break;
1345                 }
1346         }
1347         xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1348         __wa_xfer_is_done(xfer);
1349         spin_unlock_irqrestore(&xfer->lock, flags);
1350         wa_xfer_completion(xfer);
1351         if (rpipe_ready)
1352                 wa_xfer_delayed_run(rpipe);
1353         return 0;
1354
1355 out_unlock:
1356         spin_unlock_irqrestore(&xfer->lock, flags);
1357 out:
1358         return 0;
1359
1360 dequeue_delayed:
1361         list_del_init(&xfer->list_node);
1362         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1363         xfer->result = urb->status;
1364         spin_unlock_irqrestore(&xfer->lock, flags);
1365         wa_xfer_giveback(xfer);
1366         usb_put_urb(urb);               /* we got a ref in enqueue() */
1367         return 0;
1368 }
1369 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1370
1371 /*
1372  * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1373  * codes
1374  *
1375  * Positive errno values are internal inconsistencies and should be
1376  * flagged louder. Negative are to be passed up to the user in the
1377  * normal way.
1378  *
1379  * @status: USB WA status code -- high two bits are stripped.
1380  */
1381 static int wa_xfer_status_to_errno(u8 status)
1382 {
1383         int errno;
1384         u8 real_status = status;
1385         static int xlat[] = {
1386                 [WA_XFER_STATUS_SUCCESS] =              0,
1387                 [WA_XFER_STATUS_HALTED] =               -EPIPE,
1388                 [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1389                 [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1390                 [WA_XFER_RESERVED] =                    EINVAL,
1391                 [WA_XFER_STATUS_NOT_FOUND] =            0,
1392                 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1393                 [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1394                 [WA_XFER_STATUS_ABORTED] =              -EINTR,
1395                 [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1396                 [WA_XFER_INVALID_FORMAT] =              EINVAL,
1397                 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1398                 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1399         };
1400         status &= 0x3f;
1401
1402         if (status == 0)
1403                 return 0;
1404         if (status >= ARRAY_SIZE(xlat)) {
1405                 printk_ratelimited(KERN_ERR "%s(): BUG? "
1406                                "Unknown WA transfer status 0x%02x\n",
1407                                __func__, real_status);
1408                 return -EINVAL;
1409         }
1410         errno = xlat[status];
1411         if (unlikely(errno > 0)) {
1412                 printk_ratelimited(KERN_ERR "%s(): BUG? "
1413                                "Inconsistent WA status: 0x%02x\n",
1414                                __func__, real_status);
1415                 errno = -errno;
1416         }
1417         return errno;
1418 }
1419
1420 /*
1421  * Process a xfer result completion message
1422  *
1423  * inbound transfers: need to schedule a DTI read
1424  *
1425  * FIXME: this function needs to be broken up in parts
1426  */
1427 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
1428                 struct wa_xfer_result *xfer_result)
1429 {
1430         int result;
1431         struct device *dev = &wa->usb_iface->dev;
1432         unsigned long flags;
1433         u8 seg_idx;
1434         struct wa_seg *seg;
1435         struct wa_rpipe *rpipe;
1436         unsigned done = 0;
1437         u8 usb_status;
1438         unsigned rpipe_ready = 0;
1439
1440         spin_lock_irqsave(&xfer->lock, flags);
1441         seg_idx = xfer_result->bTransferSegment & 0x7f;
1442         if (unlikely(seg_idx >= xfer->segs))
1443                 goto error_bad_seg;
1444         seg = xfer->seg[seg_idx];
1445         rpipe = xfer->ep->hcpriv;
1446         usb_status = xfer_result->bTransferStatus;
1447         dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1448                 xfer, seg_idx, usb_status, seg->status);
1449         if (seg->status == WA_SEG_ABORTED
1450             || seg->status == WA_SEG_ERROR)     /* already handled */
1451                 goto segment_aborted;
1452         if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
1453                 seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
1454         if (seg->status != WA_SEG_PENDING) {
1455                 if (printk_ratelimit())
1456                         dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1457                                 xfer, seg_idx, seg->status);
1458                 seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
1459         }
1460         if (usb_status & 0x80) {
1461                 seg->result = wa_xfer_status_to_errno(usb_status);
1462                 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1463                         xfer, xfer->id, seg->index, usb_status);
1464                 goto error_complete;
1465         }
1466         /* FIXME: we ignore warnings, tally them for stats */
1467         if (usb_status & 0x40)          /* Warning?... */
1468                 usb_status = 0;         /* ... pass */
1469         if (xfer->is_inbound) { /* IN data phase: read to buffer */
1470                 seg->status = WA_SEG_DTI_PENDING;
1471                 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1472                 /* this should always be 0 before a resubmit. */
1473                 wa->buf_in_urb->num_mapped_sgs  = 0;
1474
1475                 if (xfer->is_dma) {
1476                         wa->buf_in_urb->transfer_dma =
1477                                 xfer->urb->transfer_dma
1478                                 + (seg_idx * xfer->seg_size);
1479                         wa->buf_in_urb->transfer_flags
1480                                 |= URB_NO_TRANSFER_DMA_MAP;
1481                         wa->buf_in_urb->transfer_buffer = NULL;
1482                         wa->buf_in_urb->sg = NULL;
1483                         wa->buf_in_urb->num_sgs = 0;
1484                 } else {
1485                         /* do buffer or SG processing. */
1486                         wa->buf_in_urb->transfer_flags
1487                                 &= ~URB_NO_TRANSFER_DMA_MAP;
1488
1489                         if (xfer->urb->transfer_buffer) {
1490                                 wa->buf_in_urb->transfer_buffer =
1491                                         xfer->urb->transfer_buffer
1492                                         + (seg_idx * xfer->seg_size);
1493                                 wa->buf_in_urb->sg = NULL;
1494                                 wa->buf_in_urb->num_sgs = 0;
1495                         } else {
1496                                 /* allocate an SG list to store seg_size bytes
1497                                         and copy the subset of the xfer->urb->sg
1498                                         that matches the buffer subset we are
1499                                         about to read. */
1500                                 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1501                                         xfer->urb->sg,
1502                                         seg_idx * xfer->seg_size,
1503                                         le32_to_cpu(
1504                                                 xfer_result->dwTransferLength),
1505                                         &(wa->buf_in_urb->num_sgs));
1506
1507                                 if (!(wa->buf_in_urb->sg)) {
1508                                         wa->buf_in_urb->num_sgs = 0;
1509                                         goto error_sg_alloc;
1510                                 }
1511                                 wa->buf_in_urb->transfer_buffer = NULL;
1512                         }
1513                 }
1514                 wa->buf_in_urb->transfer_buffer_length =
1515                         le32_to_cpu(xfer_result->dwTransferLength);
1516                 wa->buf_in_urb->context = seg;
1517                 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1518                 if (result < 0)
1519                         goto error_submit_buf_in;
1520         } else {
1521                 /* OUT data phase, complete it -- */
1522                 seg->status = WA_SEG_DONE;
1523                 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1524                 xfer->segs_done++;
1525                 rpipe_ready = rpipe_avail_inc(rpipe);
1526                 done = __wa_xfer_is_done(xfer);
1527         }
1528         spin_unlock_irqrestore(&xfer->lock, flags);
1529         if (done)
1530                 wa_xfer_completion(xfer);
1531         if (rpipe_ready)
1532                 wa_xfer_delayed_run(rpipe);
1533         return;
1534
1535 error_submit_buf_in:
1536         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1537                 dev_err(dev, "DTI: URB max acceptable errors "
1538                         "exceeded, resetting device\n");
1539                 wa_reset_all(wa);
1540         }
1541         if (printk_ratelimit())
1542                 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1543                         xfer, seg_idx, result);
1544         seg->result = result;
1545         kfree(wa->buf_in_urb->sg);
1546 error_sg_alloc:
1547         __wa_xfer_abort(xfer);
1548 error_complete:
1549         seg->status = WA_SEG_ERROR;
1550         xfer->segs_done++;
1551         rpipe_ready = rpipe_avail_inc(rpipe);
1552         done = __wa_xfer_is_done(xfer);
1553         /*
1554          * queue work item to clear STALL for control endpoints.
1555          * Otherwise, let endpoint_reset take care of it.
1556          */
1557         if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1558                 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1559                 done) {
1560
1561                 dev_info(dev, "Control EP stall.  Queue delayed work.\n");
1562                 spin_lock_irq(&wa->xfer_list_lock);
1563                 /* move xfer from xfer_list to xfer_errored_list. */
1564                 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
1565                 spin_unlock_irq(&wa->xfer_list_lock);
1566                 spin_unlock_irqrestore(&xfer->lock, flags);
1567                 queue_work(wusbd, &wa->xfer_error_work);
1568         } else {
1569                 spin_unlock_irqrestore(&xfer->lock, flags);
1570                 if (done)
1571                         wa_xfer_completion(xfer);
1572                 if (rpipe_ready)
1573                         wa_xfer_delayed_run(rpipe);
1574         }
1575
1576         return;
1577
1578 error_bad_seg:
1579         spin_unlock_irqrestore(&xfer->lock, flags);
1580         wa_urb_dequeue(wa, xfer->urb);
1581         if (printk_ratelimit())
1582                 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1583         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1584                 dev_err(dev, "DTI: URB max acceptable errors "
1585                         "exceeded, resetting device\n");
1586                 wa_reset_all(wa);
1587         }
1588         return;
1589
1590 segment_aborted:
1591         /* nothing to do, as the aborter did the completion */
1592         spin_unlock_irqrestore(&xfer->lock, flags);
1593 }
1594
1595 /*
1596  * Callback for the IN data phase
1597  *
1598  * If successful transition state; otherwise, take a note of the
1599  * error, mark this segment done and try completion.
1600  *
1601  * Note we don't access until we are sure that the transfer hasn't
1602  * been cancelled (ECONNRESET, ENOENT), which could mean that
1603  * seg->xfer could be already gone.
1604  */
1605 static void wa_buf_in_cb(struct urb *urb)
1606 {
1607         struct wa_seg *seg = urb->context;
1608         struct wa_xfer *xfer = seg->xfer;
1609         struct wahc *wa;
1610         struct device *dev;
1611         struct wa_rpipe *rpipe;
1612         unsigned rpipe_ready;
1613         unsigned long flags;
1614         u8 done = 0;
1615
1616         /* free the sg if it was used. */
1617         kfree(urb->sg);
1618         urb->sg = NULL;
1619
1620         switch (urb->status) {
1621         case 0:
1622                 spin_lock_irqsave(&xfer->lock, flags);
1623                 wa = xfer->wa;
1624                 dev = &wa->usb_iface->dev;
1625                 rpipe = xfer->ep->hcpriv;
1626                 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1627                         xfer, seg->index, (size_t)urb->actual_length);
1628                 seg->status = WA_SEG_DONE;
1629                 seg->result = urb->actual_length;
1630                 xfer->segs_done++;
1631                 rpipe_ready = rpipe_avail_inc(rpipe);
1632                 done = __wa_xfer_is_done(xfer);
1633                 spin_unlock_irqrestore(&xfer->lock, flags);
1634                 if (done)
1635                         wa_xfer_completion(xfer);
1636                 if (rpipe_ready)
1637                         wa_xfer_delayed_run(rpipe);
1638                 break;
1639         case -ECONNRESET:       /* URB unlinked; no need to do anything */
1640         case -ENOENT:           /* as it was done by the who unlinked us */
1641                 break;
1642         default:                /* Other errors ... */
1643                 spin_lock_irqsave(&xfer->lock, flags);
1644                 wa = xfer->wa;
1645                 dev = &wa->usb_iface->dev;
1646                 rpipe = xfer->ep->hcpriv;
1647                 if (printk_ratelimit())
1648                         dev_err(dev, "xfer %p#%u: data in error %d\n",
1649                                 xfer, seg->index, urb->status);
1650                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1651                             EDC_ERROR_TIMEFRAME)){
1652                         dev_err(dev, "DTO: URB max acceptable errors "
1653                                 "exceeded, resetting device\n");
1654                         wa_reset_all(wa);
1655                 }
1656                 seg->status = WA_SEG_ERROR;
1657                 seg->result = urb->status;
1658                 xfer->segs_done++;
1659                 rpipe_ready = rpipe_avail_inc(rpipe);
1660                 __wa_xfer_abort(xfer);
1661                 done = __wa_xfer_is_done(xfer);
1662                 spin_unlock_irqrestore(&xfer->lock, flags);
1663                 if (done)
1664                         wa_xfer_completion(xfer);
1665                 if (rpipe_ready)
1666                         wa_xfer_delayed_run(rpipe);
1667         }
1668 }
1669
1670 /*
1671  * Handle an incoming transfer result buffer
1672  *
1673  * Given a transfer result buffer, it completes the transfer (possibly
1674  * scheduling and buffer in read) and then resubmits the DTI URB for a
1675  * new transfer result read.
1676  *
1677  *
1678  * The xfer_result DTI URB state machine
1679  *
1680  * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1681  *
1682  * We start in OFF mode, the first xfer_result notification [through
1683  * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1684  * read.
1685  *
1686  * We receive a buffer -- if it is not a xfer_result, we complain and
1687  * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1688  * request accounting. If it is an IN segment, we move to RBI and post
1689  * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1690  * repost the DTI-URB and move to RXR state. if there was no IN
1691  * segment, it will repost the DTI-URB.
1692  *
1693  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1694  * errors) in the URBs.
1695  */
1696 static void wa_dti_cb(struct urb *urb)
1697 {
1698         int result;
1699         struct wahc *wa = urb->context;
1700         struct device *dev = &wa->usb_iface->dev;
1701         struct wa_xfer_result *xfer_result;
1702         u32 xfer_id;
1703         struct wa_xfer *xfer;
1704         u8 usb_status;
1705
1706         BUG_ON(wa->dti_urb != urb);
1707         switch (wa->dti_urb->status) {
1708         case 0:
1709                 /* We have a xfer result buffer; check it */
1710                 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1711                         urb->actual_length, urb->transfer_buffer);
1712                 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1713                         dev_err(dev, "DTI Error: xfer result--bad size "
1714                                 "xfer result (%d bytes vs %zu needed)\n",
1715                                 urb->actual_length, sizeof(*xfer_result));
1716                         break;
1717                 }
1718                 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
1719                 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1720                         dev_err(dev, "DTI Error: xfer result--"
1721                                 "bad header length %u\n",
1722                                 xfer_result->hdr.bLength);
1723                         break;
1724                 }
1725                 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1726                         dev_err(dev, "DTI Error: xfer result--"
1727                                 "bad header type 0x%02x\n",
1728                                 xfer_result->hdr.bNotifyType);
1729                         break;
1730                 }
1731                 usb_status = xfer_result->bTransferStatus & 0x3f;
1732                 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1733                         /* taken care of already */
1734                         break;
1735                 xfer_id = xfer_result->dwTransferID;
1736                 xfer = wa_xfer_get_by_id(wa, xfer_id);
1737                 if (xfer == NULL) {
1738                         /* FIXME: transaction might have been cancelled */
1739                         dev_err(dev, "DTI Error: xfer result--"
1740                                 "unknown xfer 0x%08x (status 0x%02x)\n",
1741                                 xfer_id, usb_status);
1742                         break;
1743                 }
1744                 wa_xfer_result_chew(wa, xfer, xfer_result);
1745                 wa_xfer_put(xfer);
1746                 break;
1747         case -ENOENT:           /* (we killed the URB)...so, no broadcast */
1748         case -ESHUTDOWN:        /* going away! */
1749                 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1750                 goto out;
1751         default:
1752                 /* Unknown error */
1753                 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1754                             EDC_ERROR_TIMEFRAME)) {
1755                         dev_err(dev, "DTI: URB max acceptable errors "
1756                                 "exceeded, resetting device\n");
1757                         wa_reset_all(wa);
1758                         goto out;
1759                 }
1760                 if (printk_ratelimit())
1761                         dev_err(dev, "DTI: URB error %d\n", urb->status);
1762                 break;
1763         }
1764         /* Resubmit the DTI URB */
1765         result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1766         if (result < 0) {
1767                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1768                         "resetting\n", result);
1769                 wa_reset_all(wa);
1770         }
1771 out:
1772         return;
1773 }
1774
1775 /*
1776  * Transfer complete notification
1777  *
1778  * Called from the notif.c code. We get a notification on EP2 saying
1779  * that some endpoint has some transfer result data available. We are
1780  * about to read it.
1781  *
1782  * To speed up things, we always have a URB reading the DTI URB; we
1783  * don't really set it up and start it until the first xfer complete
1784  * notification arrives, which is what we do here.
1785  *
1786  * Follow up in wa_dti_cb(), as that's where the whole state
1787  * machine starts.
1788  *
1789  * So here we just initialize the DTI URB for reading transfer result
1790  * notifications and also the buffer-in URB, for reading buffers. Then
1791  * we just submit the DTI URB.
1792  *
1793  * @wa shall be referenced
1794  */
1795 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1796 {
1797         int result;
1798         struct device *dev = &wa->usb_iface->dev;
1799         struct wa_notif_xfer *notif_xfer;
1800         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1801
1802         notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1803         BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1804
1805         if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1806                 /* FIXME: hardcoded limitation, adapt */
1807                 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1808                         notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1809                 goto error;
1810         }
1811         if (wa->dti_urb != NULL)        /* DTI URB already started */
1812                 goto out;
1813
1814         wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1815         if (wa->dti_urb == NULL) {
1816                 dev_err(dev, "Can't allocate DTI URB\n");
1817                 goto error_dti_urb_alloc;
1818         }
1819         usb_fill_bulk_urb(
1820                 wa->dti_urb, wa->usb_dev,
1821                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1822                 wa->dti_buf, wa->dti_buf_size,
1823                 wa_dti_cb, wa);
1824
1825         wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1826         if (wa->buf_in_urb == NULL) {
1827                 dev_err(dev, "Can't allocate BUF-IN URB\n");
1828                 goto error_buf_in_urb_alloc;
1829         }
1830         usb_fill_bulk_urb(
1831                 wa->buf_in_urb, wa->usb_dev,
1832                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1833                 NULL, 0, wa_buf_in_cb, wa);
1834         result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1835         if (result < 0) {
1836                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1837                         "resetting\n", result);
1838                 goto error_dti_urb_submit;
1839         }
1840 out:
1841         return;
1842
1843 error_dti_urb_submit:
1844         usb_put_urb(wa->buf_in_urb);
1845 error_buf_in_urb_alloc:
1846         usb_put_urb(wa->dti_urb);
1847         wa->dti_urb = NULL;
1848 error_dti_urb_alloc:
1849 error:
1850         wa_reset_all(wa);
1851 }