usb: wusbcore: set pointers to NULL after freeing in error cases
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / usb / wusbcore / wa-xfer.c
index d3493ca..0b27146 100644 (file)
@@ -114,8 +114,8 @@ static void wa_xfer_delayed_run(struct wa_rpipe *);
  * struct).
  */
 struct wa_seg {
-       struct urb urb;
-       struct urb *dto_urb;            /* for data output? */
+       struct urb tr_urb;              /* transfer request urb. */
+       struct urb *dto_urb;            /* for data output. */
        struct list_head list_node;     /* for rpipe->req_list */
        struct wa_xfer *xfer;           /* out xfer */
        u8 index;                       /* which segment we are */
@@ -125,10 +125,13 @@ struct wa_seg {
        u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
 };
 
-static void wa_seg_init(struct wa_seg *seg)
+static inline void wa_seg_init(struct wa_seg *seg)
 {
-       /* usb_init_urb() repeats a lot of work, so we do it here */
-       kref_init(&seg->urb.kref);
+       usb_init_urb(&seg->tr_urb);
+
+       /* set the remaining memory to 0. */
+       memset(((void *)seg) + sizeof(seg->tr_urb), 0,
+               sizeof(*seg) - sizeof(seg->tr_urb));
 }
 
 /*
@@ -166,8 +169,8 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)
 /*
  * Destroy a transfer structure
  *
- * Note that the xfer->seg[index] thingies follow the URB life cycle,
- * so we need to put them, not free them.
+ * Note that freeing xfer->seg[cnt]->urb will free the containing
+ * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
  */
 static void wa_xfer_destroy(struct kref *_xfer)
 {
@@ -175,10 +178,15 @@ static void wa_xfer_destroy(struct kref *_xfer)
        if (xfer->seg) {
                unsigned cnt;
                for (cnt = 0; cnt < xfer->segs; cnt++) {
-                       if (xfer->is_inbound)
-                               usb_put_urb(xfer->seg[cnt]->dto_urb);
-                       usb_put_urb(&xfer->seg[cnt]->urb);
+                       if (xfer->seg[cnt]) {
+                               if (xfer->seg[cnt]->dto_urb) {
+                                       kfree(xfer->seg[cnt]->dto_urb->sg);
+                                       usb_free_urb(xfer->seg[cnt]->dto_urb);
+                               }
+                               usb_free_urb(&xfer->seg[cnt]->tr_urb);
+                       }
                }
+               kfree(xfer->seg);
        }
        kfree(xfer);
 }
@@ -492,12 +500,12 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 /*
  * Callback for the OUT data phase of the segment request
  *
- * Check wa_seg_cb(); most comments also apply here because this
+ * Check wa_seg_tr_cb(); most comments also apply here because this
  * function does almost the same thing and they work closely
  * together.
  *
  * If the seg request has failed but this DTO phase has succeeded,
- * wa_seg_cb() has already failed the segment and moved the
+ * wa_seg_tr_cb() has already failed the segment and moved the
  * status to WA_SEG_ERROR, so this will go through 'case 0' and
  * effectively do nothing.
  */
@@ -512,6 +520,10 @@ static void wa_seg_dto_cb(struct urb *urb)
        unsigned rpipe_ready = 0;
        u8 done = 0;
 
+       /* free the sg if it was used. */
+       kfree(urb->sg);
+       urb->sg = NULL;
+
        switch (urb->status) {
        case 0:
                spin_lock_irqsave(&xfer->lock, flags);
@@ -574,7 +586,7 @@ static void wa_seg_dto_cb(struct urb *urb)
  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
  * finishes.
  */
-static void wa_seg_cb(struct urb *urb)
+static void wa_seg_tr_cb(struct urb *urb)
 {
        struct wa_seg *seg = urb->context;
        struct wa_xfer *xfer = seg->xfer;
@@ -627,9 +639,11 @@ static void wa_seg_cb(struct urb *urb)
        }
 }
 
-/* allocate an SG list to store bytes_to_transfer bytes and copy the
+/*
+ * Allocate an SG list to store bytes_to_transfer bytes and copy the
  * subset of the in_sg that matches the buffer subset
- * we are about to transfer. */
+ * we are about to transfer.
+ */
 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
        const unsigned int bytes_transferred,
        const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
@@ -708,6 +722,55 @@ static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
 }
 
 /*
+ * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
+ */
+static int __wa_populate_dto_urb(struct wa_xfer *xfer,
+       struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
+{
+       int result = 0;
+
+       if (xfer->is_dma) {
+               seg->dto_urb->transfer_dma =
+                       xfer->urb->transfer_dma + buf_itr_offset;
+               seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+               seg->dto_urb->sg = NULL;
+               seg->dto_urb->num_sgs = 0;
+       } else {
+               /* do buffer or SG processing. */
+               seg->dto_urb->transfer_flags &=
+                       ~URB_NO_TRANSFER_DMA_MAP;
+               /* this should always be 0 before a resubmit. */
+               seg->dto_urb->num_mapped_sgs = 0;
+
+               if (xfer->urb->transfer_buffer) {
+                       seg->dto_urb->transfer_buffer =
+                               xfer->urb->transfer_buffer +
+                               buf_itr_offset;
+                       seg->dto_urb->sg = NULL;
+                       seg->dto_urb->num_sgs = 0;
+               } else {
+                       seg->dto_urb->transfer_buffer = NULL;
+
+                       /*
+                        * allocate an SG list to store seg_size bytes
+                        * and copy the subset of the xfer->urb->sg that
+                        * matches the buffer subset we are about to
+                        * read.
+                        */
+                       seg->dto_urb->sg = wa_xfer_create_subset_sg(
+                               xfer->urb->sg,
+                               buf_itr_offset, buf_itr_size,
+                               &(seg->dto_urb->num_sgs));
+                       if (!(seg->dto_urb->sg))
+                               result = -ENOMEM;
+               }
+       }
+       seg->dto_urb->transfer_buffer_length = buf_itr_size;
+
+       return result;
+}
+
+/*
  * Allocate the segs array and initialize each of them
  *
  * The segments are freed by wa_xfer_destroy() when the xfer use count
@@ -732,17 +795,17 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
        buf_itr = 0;
        buf_size = xfer->urb->transfer_buffer_length;
        for (cnt = 0; cnt < xfer->segs; cnt++) {
-               seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
+               seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
                if (seg == NULL)
-                       goto error_seg_kzalloc;
+                       goto error_seg_kmalloc;
                wa_seg_init(seg);
                seg->xfer = xfer;
                seg->index = cnt;
-               usb_fill_bulk_urb(&seg->urb, usb_dev,
+               usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
                                  usb_sndbulkpipe(usb_dev,
                                                  dto_epd->bEndpointAddress),
                                  &seg->xfer_hdr, xfer_hdr_size,
-                                 wa_seg_cb, seg);
+                                 wa_seg_tr_cb, seg);
                buf_itr_size = min(buf_size, xfer->seg_size);
                if (xfer->is_inbound == 0 && buf_size > 0) {
                        /* outbound data. */
@@ -754,48 +817,13 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
                                usb_sndbulkpipe(usb_dev,
                                                dto_epd->bEndpointAddress),
                                NULL, 0, wa_seg_dto_cb, seg);
-                       if (xfer->is_dma) {
-                               seg->dto_urb->transfer_dma =
-                                       xfer->urb->transfer_dma + buf_itr;
-                               seg->dto_urb->transfer_flags |=
-                                       URB_NO_TRANSFER_DMA_MAP;
-                               seg->dto_urb->transfer_buffer = NULL;
-                               seg->dto_urb->sg = NULL;
-                               seg->dto_urb->num_sgs = 0;
-                       } else {
-                               /* do buffer or SG processing. */
-                               seg->dto_urb->transfer_flags &=
-                                       ~URB_NO_TRANSFER_DMA_MAP;
-                               /* this should always be 0 before a resubmit. */
-                               seg->dto_urb->num_mapped_sgs = 0;
-
-                               if (xfer->urb->transfer_buffer) {
-                                       seg->dto_urb->transfer_buffer =
-                                               xfer->urb->transfer_buffer +
-                                               buf_itr;
-                                       seg->dto_urb->sg = NULL;
-                                       seg->dto_urb->num_sgs = 0;
-                               } else {
-                                       /* allocate an SG list to store seg_size
-                                           bytes and copy the subset of the
-                                           xfer->urb->sg that matches the
-                                           buffer subset we are about to read.
-                                       */
-                                       seg->dto_urb->sg =
-                                               wa_xfer_create_subset_sg(
-                                               xfer->urb->sg,
-                                               buf_itr, buf_itr_size,
-                                               &(seg->dto_urb->num_sgs));
-
-                                       if (!(seg->dto_urb->sg)) {
-                                               seg->dto_urb->num_sgs   = 0;
-                                               goto error_sg_alloc;
-                                       }
-
-                                       seg->dto_urb->transfer_buffer = NULL;
-                               }
-                       }
-                       seg->dto_urb->transfer_buffer_length = buf_itr_size;
+
+                       /* fill in the xfer buffer information. */
+                       result = __wa_populate_dto_urb(xfer, seg,
+                                               buf_itr, buf_itr_size);
+
+                       if (result < 0)
+                               goto error_seg_outbound_populate;
                }
                seg->status = WA_SEG_READY;
                buf_itr += buf_itr_size;
@@ -803,18 +831,17 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
        }
        return 0;
 
-error_sg_alloc:
-       kfree(seg->dto_urb);
+       /*
+        * Free the memory for the current segment which failed to init.
+        * Use the fact that cnt is left at were it failed.  The remaining
+        * segments will be cleaned up by wa_xfer_destroy.
+        */
+error_seg_outbound_populate:
+       usb_free_urb(xfer->seg[cnt]->dto_urb);
 error_dto_alloc:
        kfree(xfer->seg[cnt]);
-       cnt--;
-error_seg_kzalloc:
-       /* use the fact that cnt is left at were it failed */
-       for (; cnt >= 0; cnt--) {
-               if (xfer->seg[cnt] && xfer->is_inbound == 0)
-                       usb_free_urb(xfer->seg[cnt]->dto_urb);
-               kfree(xfer->seg[cnt]);
-       }
+       xfer->seg[cnt] = NULL;
+error_seg_kmalloc:
 error_segs_kzalloc:
        return result;
 }
@@ -884,12 +911,14 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
                           struct wa_seg *seg)
 {
        int result;
-       result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
+       /* submit the transfer request. */
+       result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
        if (result < 0) {
                printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
                       xfer, seg->index, result);
                goto error_seg_submit;
        }
+       /* submit the out data if this is an out request. */
        if (seg->dto_urb) {
                result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
                if (result < 0) {
@@ -903,7 +932,7 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
        return 0;
 
 error_dto_submit:
-       usb_unlink_urb(&seg->urb);
+       usb_unlink_urb(&seg->tr_urb);
 error_seg_submit:
        seg->status = WA_SEG_ERROR;
        seg->result = result;
@@ -928,7 +957,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
        spin_lock_irqsave(&rpipe->seg_lock, flags);
        while (atomic_read(&rpipe->segs_available) > 0
              && !list_empty(&rpipe->seg_list)) {
-               seg = list_entry(rpipe->seg_list.next, struct wa_seg,
+               seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
                                 list_node);
                list_del(&seg->list_node);
                xfer = seg->xfer;
@@ -1093,34 +1122,82 @@ error_xfer_submit:
  *
  * We need to be careful here, as dequeue() could be called in the
  * middle.  That's why we do the whole thing under the
- * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
+ * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
  * and then checks the list -- so as we would be acquiring in inverse
- * order, we just drop the lock once we have the xfer and reacquire it
- * later.
+ * order, we move the delayed list to a separate list while locked and then
+ * submit them without the list lock held.
  */
 void wa_urb_enqueue_run(struct work_struct *ws)
 {
-       struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+       struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
        struct wa_xfer *xfer, *next;
        struct urb *urb;
+       LIST_HEAD(tmp_list);
 
+       /* Create a copy of the wa->xfer_delayed_list while holding the lock */
        spin_lock_irq(&wa->xfer_list_lock);
-       list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
-                                list_node) {
+       list_cut_position(&tmp_list, &wa->xfer_delayed_list,
+                       wa->xfer_delayed_list.prev);
+       spin_unlock_irq(&wa->xfer_list_lock);
+
+       /*
+        * enqueue from temp list without list lock held since wa_urb_enqueue_b
+        * can take xfer->lock as well as lock mutexes.
+        */
+       list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
                list_del_init(&xfer->list_node);
-               spin_unlock_irq(&wa->xfer_list_lock);
 
                urb = xfer->urb;
                wa_urb_enqueue_b(xfer);
                usb_put_urb(urb);       /* taken when queuing */
-
-               spin_lock_irq(&wa->xfer_list_lock);
        }
-       spin_unlock_irq(&wa->xfer_list_lock);
 }
 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
 
 /*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+       struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+       struct wa_xfer *xfer, *next;
+       LIST_HEAD(tmp_list);
+
+       pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+       /* Create a copy of the wa->xfer_errored_list while holding the lock */
+       spin_lock_irq(&wa->xfer_list_lock);
+       list_cut_position(&tmp_list, &wa->xfer_errored_list,
+                       wa->xfer_errored_list.prev);
+       spin_unlock_irq(&wa->xfer_list_lock);
+
+       /*
+        * run rpipe_clear_feature_stalled from temp list without list lock
+        * held.
+        */
+       list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+               struct usb_host_endpoint *ep;
+               unsigned long flags;
+               struct wa_rpipe *rpipe;
+
+               spin_lock_irqsave(&xfer->lock, flags);
+               ep = xfer->ep;
+               rpipe = ep->hcpriv;
+               spin_unlock_irqrestore(&xfer->lock, flags);
+
+               /* clear RPIPE feature stalled without holding a lock. */
+               rpipe_clear_feature_stalled(wa, ep);
+
+               /* complete the xfer. This removes it from the tmp list. */
+               wa_xfer_completion(xfer);
+
+               /* check for work. */
+               wa_xfer_delayed_run(rpipe);
+       }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
+/*
  * Submit a transfer to the Wire Adapter in a delayed way
  *
  * The process of enqueuing involves possible sleeps() [see
@@ -1175,7 +1252,7 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
                spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
                list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
                spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
-               queue_work(wusbd, &wa->xfer_work);
+               queue_work(wusbd, &wa->xfer_enqueue_work);
        } else {
                wa_urb_enqueue_b(xfer);
        }
@@ -1217,7 +1294,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
 
        xfer = urb->hcpriv;
        if (xfer == NULL) {
-               /* NOthing setup yet enqueue will see urb->status !=
+               /*
+                * Nothing setup yet enqueue will see urb->status !=
                 * -EINPROGRESS (by hcd layer) and bail out with
                 * error, no need to do completion
                 */
@@ -1260,7 +1338,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
                        break;
                case WA_SEG_SUBMITTED:
                        seg->status = WA_SEG_ABORTED;
-                       usb_unlink_urb(&seg->urb);
+                       usb_unlink_urb(&seg->tr_urb);
                        if (xfer->is_inbound == 0)
                                usb_unlink_urb(seg->dto_urb);
                        xfer->segs_done++;
@@ -1361,9 +1439,10 @@ static int wa_xfer_status_to_errno(u8 status)
  *
  * inbound transfers: need to schedule a DTI read
  *
- * FIXME: this functio needs to be broken up in parts
+ * FIXME: this function needs to be broken up in parts
  */
-static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
+static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
+               struct wa_xfer_result *xfer_result)
 {
        int result;
        struct device *dev = &wa->usb_iface->dev;
@@ -1371,8 +1450,7 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
        u8 seg_idx;
        struct wa_seg *seg;
        struct wa_rpipe *rpipe;
-       struct wa_xfer_result *xfer_result = wa->xfer_result;
-       u8 done = 0;
+       unsigned done = 0;
        u8 usb_status;
        unsigned rpipe_ready = 0;
 
@@ -1482,18 +1560,37 @@ error_submit_buf_in:
                        xfer, seg_idx, result);
        seg->result = result;
        kfree(wa->buf_in_urb->sg);
+       wa->buf_in_urb->sg = NULL;
 error_sg_alloc:
+       __wa_xfer_abort(xfer);
 error_complete:
        seg->status = WA_SEG_ERROR;
        xfer->segs_done++;
        rpipe_ready = rpipe_avail_inc(rpipe);
-       __wa_xfer_abort(xfer);
        done = __wa_xfer_is_done(xfer);
-       spin_unlock_irqrestore(&xfer->lock, flags);
-       if (done)
-               wa_xfer_completion(xfer);
-       if (rpipe_ready)
-               wa_xfer_delayed_run(rpipe);
+       /*
+        * queue work item to clear STALL for control endpoints.
+        * Otherwise, let endpoint_reset take care of it.
+        */
+       if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+               usb_endpoint_xfer_control(&xfer->ep->desc) &&
+               done) {
+
+               dev_info(dev, "Control EP stall.  Queue delayed work.\n");
+               spin_lock_irq(&wa->xfer_list_lock);
+               /* move xfer from xfer_list to xfer_errored_list. */
+               list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
+               spin_unlock_irq(&wa->xfer_list_lock);
+               spin_unlock_irqrestore(&xfer->lock, flags);
+               queue_work(wusbd, &wa->xfer_error_work);
+       } else {
+               spin_unlock_irqrestore(&xfer->lock, flags);
+               if (done)
+                       wa_xfer_completion(xfer);
+               if (rpipe_ready)
+                       wa_xfer_delayed_run(rpipe);
+       }
+
        return;
 
 error_bad_seg:
@@ -1614,7 +1711,7 @@ static void wa_buf_in_cb(struct urb *urb)
  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
  * errors) in the URBs.
  */
-static void wa_xfer_result_cb(struct urb *urb)
+static void wa_dti_cb(struct urb *urb)
 {
        int result;
        struct wahc *wa = urb->context;
@@ -1636,7 +1733,7 @@ static void wa_xfer_result_cb(struct urb *urb)
                                urb->actual_length, sizeof(*xfer_result));
                        break;
                }
-               xfer_result = wa->xfer_result;
+               xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
                if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
                        dev_err(dev, "DTI Error: xfer result--"
                                "bad header length %u\n",
@@ -1662,7 +1759,7 @@ static void wa_xfer_result_cb(struct urb *urb)
                                xfer_id, usb_status);
                        break;
                }
-               wa_xfer_result_chew(wa, xfer);
+               wa_xfer_result_chew(wa, xfer, xfer_result);
                wa_xfer_put(xfer);
                break;
        case -ENOENT:           /* (we killed the URB)...so, no broadcast */
@@ -1704,7 +1801,7 @@ out:
  * don't really set it up and start it until the first xfer complete
  * notification arrives, which is what we do here.
  *
- * Follow up in wa_xfer_result_cb(), as that's where the whole state
+ * Follow up in wa_dti_cb(), as that's where the whole state
  * machine starts.
  *
  * So here we just initialize the DTI URB for reading transfer result
@@ -1740,8 +1837,8 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
        usb_fill_bulk_urb(
                wa->dti_urb, wa->usb_dev,
                usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
-               wa->xfer_result, wa->xfer_result_size,
-               wa_xfer_result_cb, wa);
+               wa->dti_buf, wa->dti_buf_size,
+               wa_dti_cb, wa);
 
        wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (wa->buf_in_urb == NULL) {
@@ -1763,6 +1860,7 @@ out:
 
 error_dti_urb_submit:
        usb_put_urb(wa->buf_in_urb);
+       wa->buf_in_urb = NULL;
 error_buf_in_urb_alloc:
        usb_put_urb(wa->dti_urb);
        wa->dti_urb = NULL;