USB: clean up some host controller sparse warnings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / usb / host / isp1362-hcd.c
1 /*
2  * ISP1362 HCD (Host Controller Driver) for USB.
3  *
4  * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5  *
6  * Derived from the SL811 HCD, rewritten for ISP116x.
7  * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8  *
9  * Portions:
10  * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11  * Copyright (C) 2004 David Brownell
12  */
13
14 /*
15  * The ISP1362 chip requires a large delay (300ns and 462ns) between
16  * accesses to the address and data register.
17  * The following timing options exist:
18  *
19  * 1. Configure your memory controller to add such delays if it can (the best)
20  * 2. Implement platform-specific delay function possibly
21  *    combined with configuring the memory controller; see
22  *    include/linux/usb_isp1362.h for more info.
23  * 3. Use ndelay (easiest, poorest).
24  *
25  * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26  * platform specific section of isp1362.h to select the appropriate variant.
27  *
28  * Also note that according to the Philips "ISP1362 Errata" document
29  * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30  * is reasserted (even with #CS deasserted) within 132ns after a
31  * write cycle to any controller register. If the hardware doesn't
32  * implement the recommended fix (gating the #WR with #CS) software
33  * must ensure that no further write cycle (not necessarily to the chip!)
34  * is issued by the CPU within this interval.
35
36  * For PXA25x this can be ensured by using VLIO with the maximum
37  * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38  */
39
40 #ifdef CONFIG_USB_DEBUG
41 # define ISP1362_DEBUG
42 #else
43 # undef ISP1362_DEBUG
44 #endif
45
46 /*
47  * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48  * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49  * requests are carried out in separate frames. This will delay any SETUP
50  * packets until the start of the next frame so that this situation is
51  * unlikely to occur (and makes usbtest happy running with a PXA255 target
52  * device).
53  */
54 #undef BUGGY_PXA2XX_UDC_USBTEST
55
56 #undef PTD_TRACE
57 #undef URB_TRACE
58 #undef VERBOSE
59 #undef REGISTERS
60
61 /* This enables a memory test on the ISP1362 chip memory to make sure the
62  * chip access timing is correct.
63  */
64 #undef CHIP_BUFFER_TEST
65
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/kernel.h>
69 #include <linux/delay.h>
70 #include <linux/ioport.h>
71 #include <linux/sched.h>
72 #include <linux/slab.h>
73 #include <linux/smp_lock.h>
74 #include <linux/errno.h>
75 #include <linux/init.h>
76 #include <linux/list.h>
77 #include <linux/interrupt.h>
78 #include <linux/usb.h>
79 #include <linux/usb/isp1362.h>
80 #include <linux/usb/hcd.h>
81 #include <linux/platform_device.h>
82 #include <linux/pm.h>
83 #include <linux/io.h>
84 #include <linux/bitmap.h>
85
86 #include <asm/irq.h>
87 #include <asm/system.h>
88 #include <asm/byteorder.h>
89 #include <asm/unaligned.h>
90
91 static int dbg_level;
92 #ifdef ISP1362_DEBUG
93 module_param(dbg_level, int, 0644);
94 #else
95 module_param(dbg_level, int, 0);
96 #define STUB_DEBUG_FILE
97 #endif
98
99 #include "../core/usb.h"
100 #include "isp1362.h"
101
102
103 #define DRIVER_VERSION  "2005-04-04"
104 #define DRIVER_DESC     "ISP1362 USB Host Controller Driver"
105
106 MODULE_DESCRIPTION(DRIVER_DESC);
107 MODULE_LICENSE("GPL");
108
109 static const char hcd_name[] = "isp1362-hcd";
110
111 static void isp1362_hc_stop(struct usb_hcd *hcd);
112 static int isp1362_hc_start(struct usb_hcd *hcd);
113
114 /*-------------------------------------------------------------------------*/
115
116 /*
117  * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
118  * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
119  * completion.
120  * We don't need a 'disable' counterpart, since interrupts will be disabled
121  * only by the interrupt handler.
122  */
123 static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
124 {
125         if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
126                 return;
127         if (mask & ~isp1362_hcd->irqenb)
128                 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
129         isp1362_hcd->irqenb |= mask;
130         if (isp1362_hcd->irq_active)
131                 return;
132         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
133 }
134
135 /*-------------------------------------------------------------------------*/
136
137 static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
138                                                      u16 offset)
139 {
140         struct isp1362_ep_queue *epq = NULL;
141
142         if (offset < isp1362_hcd->istl_queue[1].buf_start)
143                 epq = &isp1362_hcd->istl_queue[0];
144         else if (offset < isp1362_hcd->intl_queue.buf_start)
145                 epq = &isp1362_hcd->istl_queue[1];
146         else if (offset < isp1362_hcd->atl_queue.buf_start)
147                 epq = &isp1362_hcd->intl_queue;
148         else if (offset < isp1362_hcd->atl_queue.buf_start +
149                    isp1362_hcd->atl_queue.buf_size)
150                 epq = &isp1362_hcd->atl_queue;
151
152         if (epq)
153                 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
154         else
155                 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
156
157         return epq;
158 }
159
160 static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
161 {
162         int offset;
163
164         if (index * epq->blk_size > epq->buf_size) {
165                 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
166                      epq->buf_size / epq->blk_size);
167                 return -EINVAL;
168         }
169         offset = epq->buf_start + index * epq->blk_size;
170         DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
171
172         return offset;
173 }
174
175 /*-------------------------------------------------------------------------*/
176
177 static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
178                                     int mps)
179 {
180         u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
181
182         xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
183         if (xfer_size < size && xfer_size % mps)
184                 xfer_size -= xfer_size % mps;
185
186         return xfer_size;
187 }
188
189 static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
190                              struct isp1362_ep *ep, u16 len)
191 {
192         int ptd_offset = -EINVAL;
193         int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
194         int found;
195
196         BUG_ON(len > epq->buf_size);
197
198         if (!epq->buf_avail)
199                 return -ENOMEM;
200
201         if (ep->num_ptds)
202                 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
203                     epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
204         BUG_ON(ep->num_ptds != 0);
205
206         found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
207                                                 num_ptds, 0);
208         if (found >= epq->buf_count)
209                 return -EOVERFLOW;
210
211         DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
212             num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
213         ptd_offset = get_ptd_offset(epq, found);
214         WARN_ON(ptd_offset < 0);
215         ep->ptd_offset = ptd_offset;
216         ep->num_ptds += num_ptds;
217         epq->buf_avail -= num_ptds;
218         BUG_ON(epq->buf_avail > epq->buf_count);
219         ep->ptd_index = found;
220         bitmap_set(&epq->buf_map, found, num_ptds);
221         DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
222             __func__, epq->name, ep->ptd_index, ep->ptd_offset,
223             epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
224
225         return found;
226 }
227
228 static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
229 {
230         int index = ep->ptd_index;
231         int last = ep->ptd_index + ep->num_ptds;
232
233         if (last > epq->buf_count)
234                 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
235                     __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
236                     ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
237                     epq->buf_map, epq->skip_map);
238         BUG_ON(last > epq->buf_count);
239
240         for (; index < last; index++) {
241                 __clear_bit(index, &epq->buf_map);
242                 __set_bit(index, &epq->skip_map);
243         }
244         epq->buf_avail += ep->num_ptds;
245         epq->ptd_count--;
246
247         BUG_ON(epq->buf_avail > epq->buf_count);
248         BUG_ON(epq->ptd_count > epq->buf_count);
249
250         DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
251             __func__, epq->name,
252             ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
253         DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
254             epq->buf_map, epq->skip_map);
255
256         ep->num_ptds = 0;
257         ep->ptd_offset = -EINVAL;
258         ep->ptd_index = -EINVAL;
259 }
260
261 /*-------------------------------------------------------------------------*/
262
263 /*
264   Set up PTD's.
265 */
266 static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
267                         struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
268                         u16 fno)
269 {
270         struct ptd *ptd;
271         int toggle;
272         int dir;
273         u16 len;
274         size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
275
276         DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
277
278         ptd = &ep->ptd;
279
280         ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
281
282         switch (ep->nextpid) {
283         case USB_PID_IN:
284                 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
285                 dir = PTD_DIR_IN;
286                 if (usb_pipecontrol(urb->pipe)) {
287                         len = min_t(size_t, ep->maxpacket, buf_len);
288                 } else if (usb_pipeisoc(urb->pipe)) {
289                         len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
290                         ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
291                 } else
292                         len = max_transfer_size(epq, buf_len, ep->maxpacket);
293                 DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
294                     (int)buf_len);
295                 break;
296         case USB_PID_OUT:
297                 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
298                 dir = PTD_DIR_OUT;
299                 if (usb_pipecontrol(urb->pipe))
300                         len = min_t(size_t, ep->maxpacket, buf_len);
301                 else if (usb_pipeisoc(urb->pipe))
302                         len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
303                 else
304                         len = max_transfer_size(epq, buf_len, ep->maxpacket);
305                 if (len == 0)
306                         pr_info("%s: Sending ZERO packet: %d\n", __func__,
307                              urb->transfer_flags & URB_ZERO_PACKET);
308                 DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
309                     (int)buf_len);
310                 break;
311         case USB_PID_SETUP:
312                 toggle = 0;
313                 dir = PTD_DIR_SETUP;
314                 len = sizeof(struct usb_ctrlrequest);
315                 DBG(1, "%s: SETUP len %d\n", __func__, len);
316                 ep->data = urb->setup_packet;
317                 break;
318         case USB_PID_ACK:
319                 toggle = 1;
320                 len = 0;
321                 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
322                         PTD_DIR_OUT : PTD_DIR_IN;
323                 DBG(1, "%s: ACK   len %d\n", __func__, len);
324                 break;
325         default:
326                 toggle = dir = len = 0;
327                 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
328                 BUG_ON(1);
329         }
330
331         ep->length = len;
332         if (!len)
333                 ep->data = NULL;
334
335         ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
336         ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
337                 PTD_EP(ep->epnum);
338         ptd->len = PTD_LEN(len) | PTD_DIR(dir);
339         ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
340
341         if (usb_pipeint(urb->pipe)) {
342                 ptd->faddr |= PTD_SF_INT(ep->branch);
343                 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
344         }
345         if (usb_pipeisoc(urb->pipe))
346                 ptd->faddr |= PTD_SF_ISO(fno);
347
348         DBG(1, "%s: Finished\n", __func__);
349 }
350
351 static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
352                               struct isp1362_ep_queue *epq)
353 {
354         struct ptd *ptd = &ep->ptd;
355         int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
356
357         _BUG_ON(ep->ptd_offset < 0);
358
359         prefetch(ptd);
360         isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
361         if (len)
362                 isp1362_write_buffer(isp1362_hcd, ep->data,
363                                      ep->ptd_offset + PTD_HEADER_SIZE, len);
364
365         dump_ptd(ptd);
366         dump_ptd_out_data(ptd, ep->data);
367 }
368
369 static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
370                              struct isp1362_ep_queue *epq)
371 {
372         struct ptd *ptd = &ep->ptd;
373         int act_len;
374
375         WARN_ON(list_empty(&ep->active));
376         BUG_ON(ep->ptd_offset < 0);
377
378         list_del_init(&ep->active);
379         DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
380
381         prefetchw(ptd);
382         isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
383         dump_ptd(ptd);
384         act_len = PTD_GET_COUNT(ptd);
385         if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
386                 return;
387         if (act_len > ep->length)
388                 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
389                          ep->ptd_offset, act_len, ep->length);
390         BUG_ON(act_len > ep->length);
391         /* Only transfer the amount of data that has actually been overwritten
392          * in the chip buffer. We don't want any data that doesn't belong to the
393          * transfer to leak out of the chip to the callers transfer buffer!
394          */
395         prefetchw(ep->data);
396         isp1362_read_buffer(isp1362_hcd, ep->data,
397                             ep->ptd_offset + PTD_HEADER_SIZE, act_len);
398         dump_ptd_in_data(ptd, ep->data);
399 }
400
401 /*
402  * INT PTDs will stay in the chip until data is available.
403  * This function will remove a PTD from the chip when the URB is dequeued.
404  * Must be called with the spinlock held and IRQs disabled
405  */
406 static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
407
408 {
409         int index;
410         struct isp1362_ep_queue *epq;
411
412         DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
413         BUG_ON(ep->ptd_offset < 0);
414
415         epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
416         BUG_ON(!epq);
417
418         /* put ep in remove_list for cleanup */
419         WARN_ON(!list_empty(&ep->remove_list));
420         list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
421         /* let SOF interrupt handle the cleanup */
422         isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
423
424         index = ep->ptd_index;
425         if (index < 0)
426                 /* ISO queues don't have SKIP registers */
427                 return;
428
429         DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
430             index, ep->ptd_offset, epq->skip_map, 1 << index);
431
432         /* prevent further processing of PTD (will be effective after next SOF) */
433         epq->skip_map |= 1 << index;
434         if (epq == &isp1362_hcd->atl_queue) {
435                 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
436                     isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
437                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
438                 if (~epq->skip_map == 0)
439                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
440         } else if (epq == &isp1362_hcd->intl_queue) {
441                 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
442                     isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
443                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
444                 if (~epq->skip_map == 0)
445                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
446         }
447 }
448
449 /*
450   Take done or failed requests out of schedule. Give back
451   processed urbs.
452 */
453 static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
454                            struct urb *urb, int status)
455      __releases(isp1362_hcd->lock)
456      __acquires(isp1362_hcd->lock)
457 {
458         urb->hcpriv = NULL;
459         ep->error_count = 0;
460
461         if (usb_pipecontrol(urb->pipe))
462                 ep->nextpid = USB_PID_SETUP;
463
464         URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
465                 ep->num_req, usb_pipedevice(urb->pipe),
466                 usb_pipeendpoint(urb->pipe),
467                 !usb_pipein(urb->pipe) ? "out" : "in",
468                 usb_pipecontrol(urb->pipe) ? "ctrl" :
469                         usb_pipeint(urb->pipe) ? "int" :
470                         usb_pipebulk(urb->pipe) ? "bulk" :
471                         "iso",
472                 urb->actual_length, urb->transfer_buffer_length,
473                 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
474                 "short_ok" : "", urb->status);
475
476
477         usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
478         spin_unlock(&isp1362_hcd->lock);
479         usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
480         spin_lock(&isp1362_hcd->lock);
481
482         /* take idle endpoints out of the schedule right away */
483         if (!list_empty(&ep->hep->urb_list))
484                 return;
485
486         /* async deschedule */
487         if (!list_empty(&ep->schedule)) {
488                 list_del_init(&ep->schedule);
489                 return;
490         }
491
492
493         if (ep->interval) {
494                 /* periodic deschedule */
495                 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
496                     ep, ep->branch, ep->load,
497                     isp1362_hcd->load[ep->branch],
498                     isp1362_hcd->load[ep->branch] - ep->load);
499                 isp1362_hcd->load[ep->branch] -= ep->load;
500                 ep->branch = PERIODIC_SIZE;
501         }
502 }
503
504 /*
505  * Analyze transfer results, handle partial transfers and errors
506 */
507 static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
508 {
509         struct urb *urb = get_urb(ep);
510         struct usb_device *udev;
511         struct ptd *ptd;
512         int short_ok;
513         u16 len;
514         int urbstat = -EINPROGRESS;
515         u8 cc;
516
517         DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
518
519         udev = urb->dev;
520         ptd = &ep->ptd;
521         cc = PTD_GET_CC(ptd);
522         if (cc == PTD_NOTACCESSED) {
523                 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
524                     ep->num_req, ptd);
525                 cc = PTD_DEVNOTRESP;
526         }
527
528         short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
529         len = urb->transfer_buffer_length - urb->actual_length;
530
531         /* Data underrun is special. For allowed underrun
532            we clear the error and continue as normal. For
533            forbidden underrun we finish the DATA stage
534            immediately while for control transfer,
535            we do a STATUS stage.
536         */
537         if (cc == PTD_DATAUNDERRUN) {
538                 if (short_ok) {
539                         DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
540                             __func__, ep->num_req, short_ok ? "" : "not_",
541                             PTD_GET_COUNT(ptd), ep->maxpacket, len);
542                         cc = PTD_CC_NOERROR;
543                         urbstat = 0;
544                 } else {
545                         DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
546                             __func__, ep->num_req,
547                             usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
548                             short_ok ? "" : "not_",
549                             PTD_GET_COUNT(ptd), ep->maxpacket, len);
550                         if (usb_pipecontrol(urb->pipe)) {
551                                 ep->nextpid = USB_PID_ACK;
552                                 /* save the data underrun error code for later and
553                                  * procede with the status stage
554                                  */
555                                 urb->actual_length += PTD_GET_COUNT(ptd);
556                                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
557
558                                 if (urb->status == -EINPROGRESS)
559                                         urb->status = cc_to_error[PTD_DATAUNDERRUN];
560                         } else {
561                                 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
562                                               PTD_GET_TOGGLE(ptd));
563                                 urbstat = cc_to_error[PTD_DATAUNDERRUN];
564                         }
565                         goto out;
566                 }
567         }
568
569         if (cc != PTD_CC_NOERROR) {
570                 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
571                         urbstat = cc_to_error[cc];
572                         DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
573                             __func__, ep->num_req, ep->nextpid, urbstat, cc,
574                             ep->error_count);
575                 }
576                 goto out;
577         }
578
579         switch (ep->nextpid) {
580         case USB_PID_OUT:
581                 if (PTD_GET_COUNT(ptd) != ep->length)
582                         pr_err("%s: count=%d len=%d\n", __func__,
583                            PTD_GET_COUNT(ptd), ep->length);
584                 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
585                 urb->actual_length += ep->length;
586                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
587                 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
588                 if (urb->actual_length == urb->transfer_buffer_length) {
589                         DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
590                             ep->num_req, len, ep->maxpacket, urbstat);
591                         if (usb_pipecontrol(urb->pipe)) {
592                                 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
593                                     ep->num_req,
594                                     usb_pipein(urb->pipe) ? "IN" : "OUT");
595                                 ep->nextpid = USB_PID_ACK;
596                         } else {
597                                 if (len % ep->maxpacket ||
598                                     !(urb->transfer_flags & URB_ZERO_PACKET)) {
599                                         urbstat = 0;
600                                         DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
601                                             __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
602                                             urbstat, len, ep->maxpacket, urb->actual_length);
603                                 }
604                         }
605                 }
606                 break;
607         case USB_PID_IN:
608                 len = PTD_GET_COUNT(ptd);
609                 BUG_ON(len > ep->length);
610                 urb->actual_length += len;
611                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
612                 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
613                 /* if transfer completed or (allowed) data underrun */
614                 if ((urb->transfer_buffer_length == urb->actual_length) ||
615                     len % ep->maxpacket) {
616                         DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
617                             ep->num_req, len, ep->maxpacket, urbstat);
618                         if (usb_pipecontrol(urb->pipe)) {
619                                 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
620                                     ep->num_req,
621                                     usb_pipein(urb->pipe) ? "IN" : "OUT");
622                                 ep->nextpid = USB_PID_ACK;
623                         } else {
624                                 urbstat = 0;
625                                 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
626                                     __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
627                                     urbstat, len, ep->maxpacket, urb->actual_length);
628                         }
629                 }
630                 break;
631         case USB_PID_SETUP:
632                 if (urb->transfer_buffer_length == urb->actual_length) {
633                         ep->nextpid = USB_PID_ACK;
634                 } else if (usb_pipeout(urb->pipe)) {
635                         usb_settoggle(udev, 0, 1, 1);
636                         ep->nextpid = USB_PID_OUT;
637                 } else {
638                         usb_settoggle(udev, 0, 0, 1);
639                         ep->nextpid = USB_PID_IN;
640                 }
641                 break;
642         case USB_PID_ACK:
643                 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
644                     urbstat);
645                 WARN_ON(urbstat != -EINPROGRESS);
646                 urbstat = 0;
647                 ep->nextpid = 0;
648                 break;
649         default:
650                 BUG_ON(1);
651         }
652
653  out:
654         if (urbstat != -EINPROGRESS) {
655                 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
656                     ep, ep->num_req, urb, urbstat);
657                 finish_request(isp1362_hcd, ep, urb, urbstat);
658         }
659 }
660
661 static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
662 {
663         struct isp1362_ep *ep;
664         struct isp1362_ep *tmp;
665
666         list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
667                 struct isp1362_ep_queue *epq =
668                         get_ptd_queue(isp1362_hcd, ep->ptd_offset);
669                 int index = ep->ptd_index;
670
671                 BUG_ON(epq == NULL);
672                 if (index >= 0) {
673                         DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
674                         BUG_ON(ep->num_ptds == 0);
675                         release_ptd_buffers(epq, ep);
676                 }
677                 if (!list_empty(&ep->hep->urb_list)) {
678                         struct urb *urb = get_urb(ep);
679
680                         DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
681                             ep->num_req, ep);
682                         finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
683                 }
684                 WARN_ON(list_empty(&ep->active));
685                 if (!list_empty(&ep->active)) {
686                         list_del_init(&ep->active);
687                         DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
688                 }
689                 list_del_init(&ep->remove_list);
690                 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
691         }
692         DBG(1, "%s: Done\n", __func__);
693 }
694
695 static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
696 {
697         if (count > 0) {
698                 if (count < isp1362_hcd->atl_queue.ptd_count)
699                         isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
700                 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
701                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
702                 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
703         } else
704                 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
705 }
706
707 static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
708 {
709         isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
710         isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
711         isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
712 }
713
714 static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
715 {
716         isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
717         isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
718                            HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
719 }
720
721 static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
722                       struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
723 {
724         int index = epq->free_ptd;
725
726         prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
727         index = claim_ptd_buffers(epq, ep, ep->length);
728         if (index == -ENOMEM) {
729                 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
730                     ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
731                 return index;
732         } else if (index == -EOVERFLOW) {
733                 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
734                     __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
735                     epq->buf_map, epq->skip_map);
736                 return index;
737         } else
738                 BUG_ON(index < 0);
739         list_add_tail(&ep->active, &epq->active);
740         DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
741             ep, ep->num_req, ep->length, &epq->active);
742         DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
743             ep->ptd_offset, ep, ep->num_req);
744         isp1362_write_ptd(isp1362_hcd, ep, epq);
745         __clear_bit(ep->ptd_index, &epq->skip_map);
746
747         return 0;
748 }
749
750 static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
751 {
752         int ptd_count = 0;
753         struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
754         struct isp1362_ep *ep;
755         int defer = 0;
756
757         if (atomic_read(&epq->finishing)) {
758                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
759                 return;
760         }
761
762         list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
763                 struct urb *urb = get_urb(ep);
764                 int ret;
765
766                 if (!list_empty(&ep->active)) {
767                         DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
768                         continue;
769                 }
770
771                 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
772                     ep, ep->num_req);
773
774                 ret = submit_req(isp1362_hcd, urb, ep, epq);
775                 if (ret == -ENOMEM) {
776                         defer = 1;
777                         break;
778                 } else if (ret == -EOVERFLOW) {
779                         defer = 1;
780                         continue;
781                 }
782 #ifdef BUGGY_PXA2XX_UDC_USBTEST
783                 defer = ep->nextpid == USB_PID_SETUP;
784 #endif
785                 ptd_count++;
786         }
787
788         /* Avoid starving of endpoints */
789         if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
790                 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
791                 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
792         }
793         if (ptd_count || defer)
794                 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
795
796         epq->ptd_count += ptd_count;
797         if (epq->ptd_count > epq->stat_maxptds) {
798                 epq->stat_maxptds = epq->ptd_count;
799                 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
800         }
801 }
802
803 static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
804 {
805         int ptd_count = 0;
806         struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
807         struct isp1362_ep *ep;
808
809         if (atomic_read(&epq->finishing)) {
810                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
811                 return;
812         }
813
814         list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
815                 struct urb *urb = get_urb(ep);
816                 int ret;
817
818                 if (!list_empty(&ep->active)) {
819                         DBG(1, "%s: Skipping active %s ep %p\n", __func__,
820                             epq->name, ep);
821                         continue;
822                 }
823
824                 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
825                     epq->name, ep, ep->num_req);
826                 ret = submit_req(isp1362_hcd, urb, ep, epq);
827                 if (ret == -ENOMEM)
828                         break;
829                 else if (ret == -EOVERFLOW)
830                         continue;
831                 ptd_count++;
832         }
833
834         if (ptd_count) {
835                 static int last_count;
836
837                 if (ptd_count != last_count) {
838                         DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
839                         last_count = ptd_count;
840                 }
841                 enable_intl_transfers(isp1362_hcd);
842         }
843
844         epq->ptd_count += ptd_count;
845         if (epq->ptd_count > epq->stat_maxptds)
846                 epq->stat_maxptds = epq->ptd_count;
847 }
848
849 static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
850 {
851         u16 ptd_offset = ep->ptd_offset;
852         int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
853
854         DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
855             ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
856
857         ptd_offset += num_ptds * epq->blk_size;
858         if (ptd_offset < epq->buf_start + epq->buf_size)
859                 return ptd_offset;
860         else
861                 return -ENOMEM;
862 }
863
864 static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
865 {
866         int ptd_count = 0;
867         int flip = isp1362_hcd->istl_flip;
868         struct isp1362_ep_queue *epq;
869         int ptd_offset;
870         struct isp1362_ep *ep;
871         struct isp1362_ep *tmp;
872         u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
873
874  fill2:
875         epq = &isp1362_hcd->istl_queue[flip];
876         if (atomic_read(&epq->finishing)) {
877                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
878                 return;
879         }
880
881         if (!list_empty(&epq->active))
882                 return;
883
884         ptd_offset = epq->buf_start;
885         list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
886                 struct urb *urb = get_urb(ep);
887                 s16 diff = fno - (u16)urb->start_frame;
888
889                 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
890
891                 if (diff > urb->number_of_packets) {
892                         /* time frame for this URB has elapsed */
893                         finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
894                         continue;
895                 } else if (diff < -1) {
896                         /* URB is not due in this frame or the next one.
897                          * Comparing with '-1' instead of '0' accounts for double
898                          * buffering in the ISP1362 which enables us to queue the PTD
899                          * one frame ahead of time
900                          */
901                 } else if (diff == -1) {
902                         /* submit PTD's that are due in the next frame */
903                         prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
904                         if (ptd_offset + PTD_HEADER_SIZE + ep->length >
905                             epq->buf_start + epq->buf_size) {
906                                 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
907                                     __func__, ep->length);
908                                 continue;
909                         }
910                         ep->ptd_offset = ptd_offset;
911                         list_add_tail(&ep->active, &epq->active);
912
913                         ptd_offset = next_ptd(epq, ep);
914                         if (ptd_offset < 0) {
915                                 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
916                                      ep->num_req, epq->name);
917                                 break;
918                         }
919                 }
920         }
921         list_for_each_entry(ep, &epq->active, active) {
922                 if (epq->active.next == &ep->active)
923                         ep->ptd.mps |= PTD_LAST_MSK;
924                 isp1362_write_ptd(isp1362_hcd, ep, epq);
925                 ptd_count++;
926         }
927
928         if (ptd_count)
929                 enable_istl_transfers(isp1362_hcd, flip);
930
931         epq->ptd_count += ptd_count;
932         if (epq->ptd_count > epq->stat_maxptds)
933                 epq->stat_maxptds = epq->ptd_count;
934
935         /* check, whether the second ISTL buffer may also be filled */
936         if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
937               (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
938                 fno++;
939                 ptd_count = 0;
940                 flip = 1 - flip;
941                 goto fill2;
942         }
943 }
944
945 static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
946                              struct isp1362_ep_queue *epq)
947 {
948         struct isp1362_ep *ep;
949         struct isp1362_ep *tmp;
950
951         if (list_empty(&epq->active)) {
952                 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
953                 return;
954         }
955
956         DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
957
958         atomic_inc(&epq->finishing);
959         list_for_each_entry_safe(ep, tmp, &epq->active, active) {
960                 int index = ep->ptd_index;
961
962                 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
963                     index, ep->ptd_offset);
964
965                 BUG_ON(index < 0);
966                 if (__test_and_clear_bit(index, &done_map)) {
967                         isp1362_read_ptd(isp1362_hcd, ep, epq);
968                         epq->free_ptd = index;
969                         BUG_ON(ep->num_ptds == 0);
970                         release_ptd_buffers(epq, ep);
971
972                         DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
973                             ep, ep->num_req);
974                         if (!list_empty(&ep->remove_list)) {
975                                 list_del_init(&ep->remove_list);
976                                 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
977                         }
978                         DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
979                             ep, ep->num_req);
980                         postproc_ep(isp1362_hcd, ep);
981                 }
982                 if (!done_map)
983                         break;
984         }
985         if (done_map)
986                 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
987                      epq->skip_map);
988         atomic_dec(&epq->finishing);
989 }
990
991 static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
992 {
993         struct isp1362_ep *ep;
994         struct isp1362_ep *tmp;
995
996         if (list_empty(&epq->active)) {
997                 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
998                 return;
999         }
1000
1001         DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
1002
1003         atomic_inc(&epq->finishing);
1004         list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1005                 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1006
1007                 isp1362_read_ptd(isp1362_hcd, ep, epq);
1008                 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1009                 postproc_ep(isp1362_hcd, ep);
1010         }
1011         WARN_ON(epq->blk_size != 0);
1012         atomic_dec(&epq->finishing);
1013 }
1014
1015 static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1016 {
1017         int handled = 0;
1018         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1019         u16 irqstat;
1020         u16 svc_mask;
1021
1022         spin_lock(&isp1362_hcd->lock);
1023
1024         BUG_ON(isp1362_hcd->irq_active++);
1025
1026         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1027
1028         irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1029         DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1030
1031         /* only handle interrupts that are currently enabled */
1032         irqstat &= isp1362_hcd->irqenb;
1033         isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1034         svc_mask = irqstat;
1035
1036         if (irqstat & HCuPINT_SOF) {
1037                 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1038                 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1039                 handled = 1;
1040                 svc_mask &= ~HCuPINT_SOF;
1041                 DBG(3, "%s: SOF\n", __func__);
1042                 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1043                 if (!list_empty(&isp1362_hcd->remove_list))
1044                         finish_unlinks(isp1362_hcd);
1045                 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1046                         if (list_empty(&isp1362_hcd->atl_queue.active)) {
1047                                 start_atl_transfers(isp1362_hcd);
1048                         } else {
1049                                 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1050                                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1051                                                     isp1362_hcd->atl_queue.skip_map);
1052                                 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1053                         }
1054                 }
1055         }
1056
1057         if (irqstat & HCuPINT_ISTL0) {
1058                 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1059                 handled = 1;
1060                 svc_mask &= ~HCuPINT_ISTL0;
1061                 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1062                 DBG(1, "%s: ISTL0\n", __func__);
1063                 WARN_ON((int)!!isp1362_hcd->istl_flip);
1064                 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1065                         HCBUFSTAT_ISTL0_ACTIVE);
1066                 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1067                         HCBUFSTAT_ISTL0_DONE));
1068                 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1069         }
1070
1071         if (irqstat & HCuPINT_ISTL1) {
1072                 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1073                 handled = 1;
1074                 svc_mask &= ~HCuPINT_ISTL1;
1075                 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1076                 DBG(1, "%s: ISTL1\n", __func__);
1077                 WARN_ON(!(int)isp1362_hcd->istl_flip);
1078                 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1079                         HCBUFSTAT_ISTL1_ACTIVE);
1080                 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1081                         HCBUFSTAT_ISTL1_DONE));
1082                 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1083         }
1084
1085         if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1086                 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1087                         (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1088                 finish_iso_transfers(isp1362_hcd,
1089                                      &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1090                 start_iso_transfers(isp1362_hcd);
1091                 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1092         }
1093
1094         if (irqstat & HCuPINT_INTL) {
1095                 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1096                 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1097                 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1098
1099                 DBG(2, "%s: INTL\n", __func__);
1100
1101                 svc_mask &= ~HCuPINT_INTL;
1102
1103                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1104                 if (~(done_map | skip_map) == 0)
1105                         /* All PTDs are finished, disable INTL processing entirely */
1106                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1107
1108                 handled = 1;
1109                 WARN_ON(!done_map);
1110                 if (done_map) {
1111                         DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1112                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1113                         start_intl_transfers(isp1362_hcd);
1114                 }
1115         }
1116
1117         if (irqstat & HCuPINT_ATL) {
1118                 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1119                 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1120                 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1121
1122                 DBG(2, "%s: ATL\n", __func__);
1123
1124                 svc_mask &= ~HCuPINT_ATL;
1125
1126                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1127                 if (~(done_map | skip_map) == 0)
1128                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1129                 if (done_map) {
1130                         DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1131                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1132                         start_atl_transfers(isp1362_hcd);
1133                 }
1134                 handled = 1;
1135         }
1136
1137         if (irqstat & HCuPINT_OPR) {
1138                 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1139                 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1140
1141                 svc_mask &= ~HCuPINT_OPR;
1142                 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1143                 intstat &= isp1362_hcd->intenb;
1144                 if (intstat & OHCI_INTR_UE) {
1145                         pr_err("Unrecoverable error\n");
1146                         /* FIXME: do here reset or cleanup or whatever */
1147                 }
1148                 if (intstat & OHCI_INTR_RHSC) {
1149                         isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1150                         isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1151                         isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1152                 }
1153                 if (intstat & OHCI_INTR_RD) {
1154                         pr_info("%s: RESUME DETECTED\n", __func__);
1155                         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1156                         usb_hcd_resume_root_hub(hcd);
1157                 }
1158                 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1159                 irqstat &= ~HCuPINT_OPR;
1160                 handled = 1;
1161         }
1162
1163         if (irqstat & HCuPINT_SUSP) {
1164                 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1165                 handled = 1;
1166                 svc_mask &= ~HCuPINT_SUSP;
1167
1168                 pr_info("%s: SUSPEND IRQ\n", __func__);
1169         }
1170
1171         if (irqstat & HCuPINT_CLKRDY) {
1172                 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1173                 handled = 1;
1174                 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1175                 svc_mask &= ~HCuPINT_CLKRDY;
1176                 pr_info("%s: CLKRDY IRQ\n", __func__);
1177         }
1178
1179         if (svc_mask)
1180                 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1181
1182         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1183         isp1362_hcd->irq_active--;
1184         spin_unlock(&isp1362_hcd->lock);
1185
1186         return IRQ_RETVAL(handled);
1187 }
1188
1189 /*-------------------------------------------------------------------------*/
1190
1191 #define MAX_PERIODIC_LOAD       900     /* out of 1000 usec */
1192 static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1193 {
1194         int i, branch = -ENOSPC;
1195
1196         /* search for the least loaded schedule branch of that interval
1197          * which has enough bandwidth left unreserved.
1198          */
1199         for (i = 0; i < interval; i++) {
1200                 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1201                         int j;
1202
1203                         for (j = i; j < PERIODIC_SIZE; j += interval) {
1204                                 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1205                                         pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1206                                             load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1207                                         break;
1208                                 }
1209                         }
1210                         if (j < PERIODIC_SIZE)
1211                                 continue;
1212                         branch = i;
1213                 }
1214         }
1215         return branch;
1216 }
1217
1218 /* NB! ALL the code above this point runs with isp1362_hcd->lock
1219    held, irqs off
1220 */
1221
1222 /*-------------------------------------------------------------------------*/
1223
1224 static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1225                                struct urb *urb,
1226                                gfp_t mem_flags)
1227 {
1228         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1229         struct usb_device *udev = urb->dev;
1230         unsigned int pipe = urb->pipe;
1231         int is_out = !usb_pipein(pipe);
1232         int type = usb_pipetype(pipe);
1233         int epnum = usb_pipeendpoint(pipe);
1234         struct usb_host_endpoint *hep = urb->ep;
1235         struct isp1362_ep *ep = NULL;
1236         unsigned long flags;
1237         int retval = 0;
1238
1239         DBG(3, "%s: urb %p\n", __func__, urb);
1240
1241         if (type == PIPE_ISOCHRONOUS) {
1242                 pr_err("Isochronous transfers not supported\n");
1243                 return -ENOSPC;
1244         }
1245
1246         URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1247                 usb_pipedevice(pipe), epnum,
1248                 is_out ? "out" : "in",
1249                 usb_pipecontrol(pipe) ? "ctrl" :
1250                         usb_pipeint(pipe) ? "int" :
1251                         usb_pipebulk(pipe) ? "bulk" :
1252                         "iso",
1253                 urb->transfer_buffer_length,
1254                 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1255                 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1256                 "short_ok" : "");
1257
1258         /* avoid all allocations within spinlocks: request or endpoint */
1259         if (!hep->hcpriv) {
1260                 ep = kzalloc(sizeof *ep, mem_flags);
1261                 if (!ep)
1262                         return -ENOMEM;
1263         }
1264         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1265
1266         /* don't submit to a dead or disabled port */
1267         if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1268               USB_PORT_STAT_ENABLE) ||
1269             !HC_IS_RUNNING(hcd->state)) {
1270                 kfree(ep);
1271                 retval = -ENODEV;
1272                 goto fail_not_linked;
1273         }
1274
1275         retval = usb_hcd_link_urb_to_ep(hcd, urb);
1276         if (retval) {
1277                 kfree(ep);
1278                 goto fail_not_linked;
1279         }
1280
1281         if (hep->hcpriv) {
1282                 ep = hep->hcpriv;
1283         } else {
1284                 INIT_LIST_HEAD(&ep->schedule);
1285                 INIT_LIST_HEAD(&ep->active);
1286                 INIT_LIST_HEAD(&ep->remove_list);
1287                 ep->udev = usb_get_dev(udev);
1288                 ep->hep = hep;
1289                 ep->epnum = epnum;
1290                 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1291                 ep->ptd_offset = -EINVAL;
1292                 ep->ptd_index = -EINVAL;
1293                 usb_settoggle(udev, epnum, is_out, 0);
1294
1295                 if (type == PIPE_CONTROL)
1296                         ep->nextpid = USB_PID_SETUP;
1297                 else if (is_out)
1298                         ep->nextpid = USB_PID_OUT;
1299                 else
1300                         ep->nextpid = USB_PID_IN;
1301
1302                 switch (type) {
1303                 case PIPE_ISOCHRONOUS:
1304                 case PIPE_INTERRUPT:
1305                         if (urb->interval > PERIODIC_SIZE)
1306                                 urb->interval = PERIODIC_SIZE;
1307                         ep->interval = urb->interval;
1308                         ep->branch = PERIODIC_SIZE;
1309                         ep->load = usb_calc_bus_time(udev->speed, !is_out,
1310                                                      (type == PIPE_ISOCHRONOUS),
1311                                                      usb_maxpacket(udev, pipe, is_out)) / 1000;
1312                         break;
1313                 }
1314                 hep->hcpriv = ep;
1315         }
1316         ep->num_req = isp1362_hcd->req_serial++;
1317
1318         /* maybe put endpoint into schedule */
1319         switch (type) {
1320         case PIPE_CONTROL:
1321         case PIPE_BULK:
1322                 if (list_empty(&ep->schedule)) {
1323                         DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1324                                 __func__, ep, ep->num_req);
1325                         list_add_tail(&ep->schedule, &isp1362_hcd->async);
1326                 }
1327                 break;
1328         case PIPE_ISOCHRONOUS:
1329         case PIPE_INTERRUPT:
1330                 urb->interval = ep->interval;
1331
1332                 /* urb submitted for already existing EP */
1333                 if (ep->branch < PERIODIC_SIZE)
1334                         break;
1335
1336                 retval = balance(isp1362_hcd, ep->interval, ep->load);
1337                 if (retval < 0) {
1338                         pr_err("%s: balance returned %d\n", __func__, retval);
1339                         goto fail;
1340                 }
1341                 ep->branch = retval;
1342                 retval = 0;
1343                 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1344                 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1345                     __func__, isp1362_hcd->fmindex, ep->branch,
1346                     ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1347                      ~(PERIODIC_SIZE - 1)) + ep->branch,
1348                     (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1349
1350                 if (list_empty(&ep->schedule)) {
1351                         if (type == PIPE_ISOCHRONOUS) {
1352                                 u16 frame = isp1362_hcd->fmindex;
1353
1354                                 frame += max_t(u16, 8, ep->interval);
1355                                 frame &= ~(ep->interval - 1);
1356                                 frame |= ep->branch;
1357                                 if (frame_before(frame, isp1362_hcd->fmindex))
1358                                         frame += ep->interval;
1359                                 urb->start_frame = frame;
1360
1361                                 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1362                                 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1363                         } else {
1364                                 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1365                                 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1366                         }
1367                 } else
1368                         DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1369
1370                 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1371                     ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1372                     isp1362_hcd->load[ep->branch] + ep->load);
1373                 isp1362_hcd->load[ep->branch] += ep->load;
1374         }
1375
1376         urb->hcpriv = hep;
1377         ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1378
1379         switch (type) {
1380         case PIPE_CONTROL:
1381         case PIPE_BULK:
1382                 start_atl_transfers(isp1362_hcd);
1383                 break;
1384         case PIPE_INTERRUPT:
1385                 start_intl_transfers(isp1362_hcd);
1386                 break;
1387         case PIPE_ISOCHRONOUS:
1388                 start_iso_transfers(isp1362_hcd);
1389                 break;
1390         default:
1391                 BUG();
1392         }
1393  fail:
1394         if (retval)
1395                 usb_hcd_unlink_urb_from_ep(hcd, urb);
1396
1397
1398  fail_not_linked:
1399         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1400         if (retval)
1401                 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1402         return retval;
1403 }
1404
1405 static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1406 {
1407         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1408         struct usb_host_endpoint *hep;
1409         unsigned long flags;
1410         struct isp1362_ep *ep;
1411         int retval = 0;
1412
1413         DBG(3, "%s: urb %p\n", __func__, urb);
1414
1415         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1416         retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1417         if (retval)
1418                 goto done;
1419
1420         hep = urb->hcpriv;
1421
1422         if (!hep) {
1423                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1424                 return -EIDRM;
1425         }
1426
1427         ep = hep->hcpriv;
1428         if (ep) {
1429                 /* In front of queue? */
1430                 if (ep->hep->urb_list.next == &urb->urb_list) {
1431                         if (!list_empty(&ep->active)) {
1432                                 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1433                                     urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1434                                 /* disable processing and queue PTD for removal */
1435                                 remove_ptd(isp1362_hcd, ep);
1436                                 urb = NULL;
1437                         }
1438                 }
1439                 if (urb) {
1440                         DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1441                             ep->num_req);
1442                         finish_request(isp1362_hcd, ep, urb, status);
1443                 } else
1444                         DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1445         } else {
1446                 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1447                 retval = -EINVAL;
1448         }
1449 done:
1450         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1451
1452         DBG(3, "%s: exit\n", __func__);
1453
1454         return retval;
1455 }
1456
1457 static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1458 {
1459         struct isp1362_ep *ep = hep->hcpriv;
1460         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1461         unsigned long flags;
1462
1463         DBG(1, "%s: ep %p\n", __func__, ep);
1464         if (!ep)
1465                 return;
1466         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1467         if (!list_empty(&hep->urb_list)) {
1468                 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1469                         DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1470                             ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1471                         remove_ptd(isp1362_hcd, ep);
1472                         pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1473                 }
1474         }
1475         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1476         /* Wait for interrupt to clear out active list */
1477         while (!list_empty(&ep->active))
1478                 msleep(1);
1479
1480         DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1481
1482         usb_put_dev(ep->udev);
1483         kfree(ep);
1484         hep->hcpriv = NULL;
1485 }
1486
1487 static int isp1362_get_frame(struct usb_hcd *hcd)
1488 {
1489         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1490         u32 fmnum;
1491         unsigned long flags;
1492
1493         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1494         fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1495         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1496
1497         return (int)fmnum;
1498 }
1499
1500 /*-------------------------------------------------------------------------*/
1501
1502 /* Adapted from ohci-hub.c */
1503 static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1504 {
1505         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1506         int ports, i, changed = 0;
1507         unsigned long flags;
1508
1509         if (!HC_IS_RUNNING(hcd->state))
1510                 return -ESHUTDOWN;
1511
1512         /* Report no status change now, if we are scheduled to be
1513            called later */
1514         if (timer_pending(&hcd->rh_timer))
1515                 return 0;
1516
1517         ports = isp1362_hcd->rhdesca & RH_A_NDP;
1518         BUG_ON(ports > 2);
1519
1520         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1521         /* init status */
1522         if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1523                 buf[0] = changed = 1;
1524         else
1525                 buf[0] = 0;
1526
1527         for (i = 0; i < ports; i++) {
1528                 u32 status = isp1362_hcd->rhport[i];
1529
1530                 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1531                               RH_PS_OCIC | RH_PS_PRSC)) {
1532                         changed = 1;
1533                         buf[0] |= 1 << (i + 1);
1534                         continue;
1535                 }
1536
1537                 if (!(status & RH_PS_CCS))
1538                         continue;
1539         }
1540         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1541         return changed;
1542 }
1543
1544 static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1545                                    struct usb_hub_descriptor *desc)
1546 {
1547         u32 reg = isp1362_hcd->rhdesca;
1548
1549         DBG(3, "%s: enter\n", __func__);
1550
1551         desc->bDescriptorType = 0x29;
1552         desc->bDescLength = 9;
1553         desc->bHubContrCurrent = 0;
1554         desc->bNbrPorts = reg & 0x3;
1555         /* Power switching, device type, overcurrent. */
1556         desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1557         DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1558         desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1559         /* two bitmaps:  ports removable, and legacy PortPwrCtrlMask */
1560         desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1561         desc->bitmap[1] = ~0;
1562
1563         DBG(3, "%s: exit\n", __func__);
1564 }
1565
1566 /* Adapted from ohci-hub.c */
1567 static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1568                                u16 wIndex, char *buf, u16 wLength)
1569 {
1570         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1571         int retval = 0;
1572         unsigned long flags;
1573         unsigned long t1;
1574         int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1575         u32 tmp = 0;
1576
1577         switch (typeReq) {
1578         case ClearHubFeature:
1579                 DBG(0, "ClearHubFeature: ");
1580                 switch (wValue) {
1581                 case C_HUB_OVER_CURRENT:
1582                         _DBG(0, "C_HUB_OVER_CURRENT\n");
1583                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1584                         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1585                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1586                 case C_HUB_LOCAL_POWER:
1587                         _DBG(0, "C_HUB_LOCAL_POWER\n");
1588                         break;
1589                 default:
1590                         goto error;
1591                 }
1592                 break;
1593         case SetHubFeature:
1594                 DBG(0, "SetHubFeature: ");
1595                 switch (wValue) {
1596                 case C_HUB_OVER_CURRENT:
1597                 case C_HUB_LOCAL_POWER:
1598                         _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1599                         break;
1600                 default:
1601                         goto error;
1602                 }
1603                 break;
1604         case GetHubDescriptor:
1605                 DBG(0, "GetHubDescriptor\n");
1606                 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1607                 break;
1608         case GetHubStatus:
1609                 DBG(0, "GetHubStatus\n");
1610                 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1611                 break;
1612         case GetPortStatus:
1613 #ifndef VERBOSE
1614                 DBG(0, "GetPortStatus\n");
1615 #endif
1616                 if (!wIndex || wIndex > ports)
1617                         goto error;
1618                 tmp = isp1362_hcd->rhport[--wIndex];
1619                 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1620                 break;
1621         case ClearPortFeature:
1622                 DBG(0, "ClearPortFeature: ");
1623                 if (!wIndex || wIndex > ports)
1624                         goto error;
1625                 wIndex--;
1626
1627                 switch (wValue) {
1628                 case USB_PORT_FEAT_ENABLE:
1629                         _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1630                         tmp = RH_PS_CCS;
1631                         break;
1632                 case USB_PORT_FEAT_C_ENABLE:
1633                         _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1634                         tmp = RH_PS_PESC;
1635                         break;
1636                 case USB_PORT_FEAT_SUSPEND:
1637                         _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1638                         tmp = RH_PS_POCI;
1639                         break;
1640                 case USB_PORT_FEAT_C_SUSPEND:
1641                         _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1642                         tmp = RH_PS_PSSC;
1643                         break;
1644                 case USB_PORT_FEAT_POWER:
1645                         _DBG(0, "USB_PORT_FEAT_POWER\n");
1646                         tmp = RH_PS_LSDA;
1647
1648                         break;
1649                 case USB_PORT_FEAT_C_CONNECTION:
1650                         _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1651                         tmp = RH_PS_CSC;
1652                         break;
1653                 case USB_PORT_FEAT_C_OVER_CURRENT:
1654                         _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1655                         tmp = RH_PS_OCIC;
1656                         break;
1657                 case USB_PORT_FEAT_C_RESET:
1658                         _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1659                         tmp = RH_PS_PRSC;
1660                         break;
1661                 default:
1662                         goto error;
1663                 }
1664
1665                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1666                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1667                 isp1362_hcd->rhport[wIndex] =
1668                         isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1669                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1670                 break;
1671         case SetPortFeature:
1672                 DBG(0, "SetPortFeature: ");
1673                 if (!wIndex || wIndex > ports)
1674                         goto error;
1675                 wIndex--;
1676                 switch (wValue) {
1677                 case USB_PORT_FEAT_SUSPEND:
1678                         _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1679 #ifdef  CONFIG_USB_OTG
1680                         if (ohci->hcd.self.otg_port == (wIndex + 1) &&
1681                             ohci->hcd.self.b_hnp_enable) {
1682                                 start_hnp(ohci);
1683                                 break;
1684                         }
1685 #endif
1686                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1687                         isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1688                         isp1362_hcd->rhport[wIndex] =
1689                                 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1690                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1691                         break;
1692                 case USB_PORT_FEAT_POWER:
1693                         _DBG(0, "USB_PORT_FEAT_POWER\n");
1694                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1695                         isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1696                         isp1362_hcd->rhport[wIndex] =
1697                                 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1698                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1699                         break;
1700                 case USB_PORT_FEAT_RESET:
1701                         _DBG(0, "USB_PORT_FEAT_RESET\n");
1702                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1703
1704                         t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1705                         while (time_before(jiffies, t1)) {
1706                                 /* spin until any current reset finishes */
1707                                 for (;;) {
1708                                         tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1709                                         if (!(tmp & RH_PS_PRS))
1710                                                 break;
1711                                         udelay(500);
1712                                 }
1713                                 if (!(tmp & RH_PS_CCS))
1714                                         break;
1715                                 /* Reset lasts 10ms (claims datasheet) */
1716                                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1717
1718                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1719                                 msleep(10);
1720                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1721                         }
1722
1723                         isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1724                                                                          HCRHPORT1 + wIndex);
1725                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1726                         break;
1727                 default:
1728                         goto error;
1729                 }
1730                 break;
1731
1732         default:
1733  error:
1734                 /* "protocol stall" on error */
1735                 _DBG(0, "PROTOCOL STALL\n");
1736                 retval = -EPIPE;
1737         }
1738
1739         return retval;
1740 }
1741
1742 #ifdef  CONFIG_PM
1743 static int isp1362_bus_suspend(struct usb_hcd *hcd)
1744 {
1745         int status = 0;
1746         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1747         unsigned long flags;
1748
1749         if (time_before(jiffies, isp1362_hcd->next_statechange))
1750                 msleep(5);
1751
1752         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1753
1754         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1755         switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1756         case OHCI_USB_RESUME:
1757                 DBG(0, "%s: resume/suspend?\n", __func__);
1758                 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1759                 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1760                 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1761                 /* FALL THROUGH */
1762         case OHCI_USB_RESET:
1763                 status = -EBUSY;
1764                 pr_warning("%s: needs reinit!\n", __func__);
1765                 goto done;
1766         case OHCI_USB_SUSPEND:
1767                 pr_warning("%s: already suspended?\n", __func__);
1768                 goto done;
1769         }
1770         DBG(0, "%s: suspend root hub\n", __func__);
1771
1772         /* First stop any processing */
1773         hcd->state = HC_STATE_QUIESCING;
1774         if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1775             !list_empty(&isp1362_hcd->intl_queue.active) ||
1776             !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1777             !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1778                 int limit;
1779
1780                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1781                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1782                 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1783                 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1784                 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1785
1786                 DBG(0, "%s: stopping schedules ...\n", __func__);
1787                 limit = 2000;
1788                 while (limit > 0) {
1789                         udelay(250);
1790                         limit -= 250;
1791                         if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1792                                 break;
1793                 }
1794                 mdelay(7);
1795                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1796                         u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1797                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1798                 }
1799                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1800                         u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1801                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1802                 }
1803                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1804                         finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1805                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1806                         finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1807         }
1808         DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1809                     isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1810         isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1811                             isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1812
1813         /* Suspend hub */
1814         isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1815         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1816         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1817         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1818
1819 #if 1
1820         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1821         if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1822                 pr_err("%s: controller won't suspend %08x\n", __func__,
1823                     isp1362_hcd->hc_control);
1824                 status = -EBUSY;
1825         } else
1826 #endif
1827         {
1828                 /* no resumes until devices finish suspending */
1829                 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1830         }
1831 done:
1832         if (status == 0) {
1833                 hcd->state = HC_STATE_SUSPENDED;
1834                 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1835                     isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1836         }
1837         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1838         return status;
1839 }
1840
1841 static int isp1362_bus_resume(struct usb_hcd *hcd)
1842 {
1843         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1844         u32 port;
1845         unsigned long flags;
1846         int status = -EINPROGRESS;
1847
1848         if (time_before(jiffies, isp1362_hcd->next_statechange))
1849                 msleep(5);
1850
1851         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1852         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1853         pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1854         if (hcd->state == HC_STATE_RESUMING) {
1855                 pr_warning("%s: duplicate resume\n", __func__);
1856                 status = 0;
1857         } else
1858                 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1859                 case OHCI_USB_SUSPEND:
1860                         DBG(0, "%s: resume root hub\n", __func__);
1861                         isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1862                         isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1863                         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1864                         break;
1865                 case OHCI_USB_RESUME:
1866                         /* HCFS changes sometime after INTR_RD */
1867                         DBG(0, "%s: remote wakeup\n", __func__);
1868                         break;
1869                 case OHCI_USB_OPER:
1870                         DBG(0, "%s: odd resume\n", __func__);
1871                         status = 0;
1872                         hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1873                         break;
1874                 default:                /* RESET, we lost power */
1875                         DBG(0, "%s: root hub hardware reset\n", __func__);
1876                         status = -EBUSY;
1877                 }
1878         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1879         if (status == -EBUSY) {
1880                 DBG(0, "%s: Restarting HC\n", __func__);
1881                 isp1362_hc_stop(hcd);
1882                 return isp1362_hc_start(hcd);
1883         }
1884         if (status != -EINPROGRESS)
1885                 return status;
1886         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1887         port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1888         while (port--) {
1889                 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1890
1891                 /* force global, not selective, resume */
1892                 if (!(stat & RH_PS_PSS)) {
1893                         DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1894                         continue;
1895                 }
1896                 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1897                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1898         }
1899         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1900
1901         /* Some controllers (lucent) need extra-long delays */
1902         hcd->state = HC_STATE_RESUMING;
1903         mdelay(20 /* usb 11.5.1.10 */ + 15);
1904
1905         isp1362_hcd->hc_control = OHCI_USB_OPER;
1906         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1907         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1908         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1909         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1910         /* TRSMRCY */
1911         msleep(10);
1912
1913         /* keep it alive for ~5x suspend + resume costs */
1914         isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1915
1916         hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1917         hcd->state = HC_STATE_RUNNING;
1918         return 0;
1919 }
1920 #else
1921 #define isp1362_bus_suspend     NULL
1922 #define isp1362_bus_resume      NULL
1923 #endif
1924
1925 /*-------------------------------------------------------------------------*/
1926
1927 #ifdef STUB_DEBUG_FILE
1928
1929 static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1930 {
1931 }
1932 static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1933 {
1934 }
1935
1936 #else
1937
1938 #include <linux/proc_fs.h>
1939 #include <linux/seq_file.h>
1940
1941 static void dump_irq(struct seq_file *s, char *label, u16 mask)
1942 {
1943         seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1944                    mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1945                    mask & HCuPINT_SUSP ? " susp" : "",
1946                    mask & HCuPINT_OPR ? " opr" : "",
1947                    mask & HCuPINT_EOT ? " eot" : "",
1948                    mask & HCuPINT_ATL ? " atl" : "",
1949                    mask & HCuPINT_SOF ? " sof" : "");
1950 }
1951
1952 static void dump_int(struct seq_file *s, char *label, u32 mask)
1953 {
1954         seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1955                    mask & OHCI_INTR_MIE ? " MIE" : "",
1956                    mask & OHCI_INTR_RHSC ? " rhsc" : "",
1957                    mask & OHCI_INTR_FNO ? " fno" : "",
1958                    mask & OHCI_INTR_UE ? " ue" : "",
1959                    mask & OHCI_INTR_RD ? " rd" : "",
1960                    mask & OHCI_INTR_SF ? " sof" : "",
1961                    mask & OHCI_INTR_SO ? " so" : "");
1962 }
1963
1964 static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1965 {
1966         seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1967                    mask & OHCI_CTRL_RWC ? " rwc" : "",
1968                    mask & OHCI_CTRL_RWE ? " rwe" : "",
1969                    ({
1970                            char *hcfs;
1971                            switch (mask & OHCI_CTRL_HCFS) {
1972                            case OHCI_USB_OPER:
1973                                    hcfs = " oper";
1974                                    break;
1975                            case OHCI_USB_RESET:
1976                                    hcfs = " reset";
1977                                    break;
1978                            case OHCI_USB_RESUME:
1979                                    hcfs = " resume";
1980                                    break;
1981                            case OHCI_USB_SUSPEND:
1982                                    hcfs = " suspend";
1983                                    break;
1984                            default:
1985                                    hcfs = " ?";
1986                            }
1987                            hcfs;
1988                    }));
1989 }
1990
1991 static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1992 {
1993         seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1994                    isp1362_read_reg32(isp1362_hcd, HCREVISION));
1995         seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1996                    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1997         seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1998                    isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1999         seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
2000                    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2001         seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
2002                    isp1362_read_reg32(isp1362_hcd, HCINTENB));
2003         seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
2004                    isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
2005         seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
2006                    isp1362_read_reg32(isp1362_hcd, HCFMREM));
2007         seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
2008                    isp1362_read_reg32(isp1362_hcd, HCFMNUM));
2009         seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
2010                    isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2011         seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2012                    isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2013         seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2014                    isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2015         seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2016                    isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2017         seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2018                    isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2019         seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2020                    isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2021         seq_printf(s, "\n");
2022         seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2023                    isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2024         seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2025                    isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2026         seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2027                    isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2028         seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2029                    isp1362_read_reg16(isp1362_hcd, HCuPINT));
2030         seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2031                    isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2032         seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2033                    isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2034         seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2035                    isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2036         seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2037                    isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2038         seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2039                    isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2040 #if 0
2041         seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2042                    isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2043 #endif
2044         seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2045                    isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2046         seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2047                    isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2048         seq_printf(s, "\n");
2049         seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2050                    isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2051         seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2052                    isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2053         seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2054                    isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2055         seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2056                    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2057         seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2058                    isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2059         seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2060                    isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2061         seq_printf(s, "\n");
2062         seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2063                    isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2064         seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2065                    isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2066 #if 0
2067         seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2068                    isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2069 #endif
2070         seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2071                    isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2072         seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2073                    isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2074         seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2075                    isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2076         seq_printf(s, "\n");
2077         seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2078                    isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2079         seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2080                    isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2081 }
2082
2083 static int proc_isp1362_show(struct seq_file *s, void *unused)
2084 {
2085         struct isp1362_hcd *isp1362_hcd = s->private;
2086         struct isp1362_ep *ep;
2087         int i;
2088
2089         seq_printf(s, "%s\n%s version %s\n",
2090                    isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2091
2092         /* collect statistics to help estimate potential win for
2093          * DMA engines that care about alignment (PXA)
2094          */
2095         seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2096                    isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2097                    isp1362_hcd->stat2, isp1362_hcd->stat1);
2098         seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2099         seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2100         seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2101                    max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2102                        isp1362_hcd->istl_queue[1] .stat_maxptds));
2103
2104         /* FIXME: don't show the following in suspended state */
2105         spin_lock_irq(&isp1362_hcd->lock);
2106
2107         dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2108         dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2109         dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2110         dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2111         dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2112
2113         for (i = 0; i < NUM_ISP1362_IRQS; i++)
2114                 if (isp1362_hcd->irq_stat[i])
2115                         seq_printf(s, "%-15s: %d\n",
2116                                    ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2117
2118         dump_regs(s, isp1362_hcd);
2119         list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2120                 struct urb *urb;
2121
2122                 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2123                            ({
2124                                    char *s;
2125                                    switch (ep->nextpid) {
2126                                    case USB_PID_IN:
2127                                            s = "in";
2128                                            break;
2129                                    case USB_PID_OUT:
2130                                            s = "out";
2131                                            break;
2132                                    case USB_PID_SETUP:
2133                                            s = "setup";
2134                                            break;
2135                                    case USB_PID_ACK:
2136                                            s = "status";
2137                                            break;
2138                                    default:
2139                                            s = "?";
2140                                            break;
2141                                    };
2142                                    s;}), ep->maxpacket) ;
2143                 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2144                         seq_printf(s, "  urb%p, %d/%d\n", urb,
2145                                    urb->actual_length,
2146                                    urb->transfer_buffer_length);
2147                 }
2148         }
2149         if (!list_empty(&isp1362_hcd->async))
2150                 seq_printf(s, "\n");
2151         dump_ptd_queue(&isp1362_hcd->atl_queue);
2152
2153         seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2154
2155         list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2156                 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2157                            isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2158
2159                 seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2160                            ep->interval, ep,
2161                            (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2162                            ep->udev->devnum, ep->epnum,
2163                            (ep->epnum == 0) ? "" :
2164                            ((ep->nextpid == USB_PID_IN) ?
2165                             "in" : "out"), ep->maxpacket);
2166         }
2167         dump_ptd_queue(&isp1362_hcd->intl_queue);
2168
2169         seq_printf(s, "ISO:\n");
2170
2171         list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2172                 seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2173                            ep->interval, ep,
2174                            (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2175                            ep->udev->devnum, ep->epnum,
2176                            (ep->epnum == 0) ? "" :
2177                            ((ep->nextpid == USB_PID_IN) ?
2178                             "in" : "out"), ep->maxpacket);
2179         }
2180
2181         spin_unlock_irq(&isp1362_hcd->lock);
2182         seq_printf(s, "\n");
2183
2184         return 0;
2185 }
2186
2187 static int proc_isp1362_open(struct inode *inode, struct file *file)
2188 {
2189         return single_open(file, proc_isp1362_show, PDE(inode)->data);
2190 }
2191
2192 static const struct file_operations proc_ops = {
2193         .open = proc_isp1362_open,
2194         .read = seq_read,
2195         .llseek = seq_lseek,
2196         .release = single_release,
2197 };
2198
2199 /* expect just one isp1362_hcd per system */
2200 static const char proc_filename[] = "driver/isp1362";
2201
2202 static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2203 {
2204         struct proc_dir_entry *pde;
2205
2206         pde = create_proc_entry(proc_filename, 0, NULL);
2207         if (pde == NULL) {
2208                 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2209                 return;
2210         }
2211
2212         pde->proc_fops = &proc_ops;
2213         pde->data = isp1362_hcd;
2214         isp1362_hcd->pde = pde;
2215 }
2216
2217 static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2218 {
2219         if (isp1362_hcd->pde)
2220                 remove_proc_entry(proc_filename, NULL);
2221 }
2222
2223 #endif
2224
2225 /*-------------------------------------------------------------------------*/
2226
2227 static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2228 {
2229         int tmp = 20;
2230         unsigned long flags;
2231
2232         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2233
2234         isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2235         isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2236         while (--tmp) {
2237                 mdelay(1);
2238                 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2239                         break;
2240         }
2241         if (!tmp)
2242                 pr_err("Software reset timeout\n");
2243         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2244 }
2245
2246 static int isp1362_mem_config(struct usb_hcd *hcd)
2247 {
2248         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2249         unsigned long flags;
2250         u32 total;
2251         u16 istl_size = ISP1362_ISTL_BUFSIZE;
2252         u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2253         u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2254         u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2255         u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2256         u16 atl_size;
2257         int i;
2258
2259         WARN_ON(istl_size & 3);
2260         WARN_ON(atl_blksize & 3);
2261         WARN_ON(intl_blksize & 3);
2262         WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2263         WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2264
2265         BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2266         if (atl_buffers > 32)
2267                 atl_buffers = 32;
2268         atl_size = atl_buffers * atl_blksize;
2269         total = atl_size + intl_size + istl_size;
2270         dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2271         dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2272                  istl_size / 2, istl_size, 0, istl_size / 2);
2273         dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2274                  ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2275                  intl_size, istl_size);
2276         dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2277                  atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2278                  atl_size, istl_size + intl_size);
2279         dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2280                  ISP1362_BUF_SIZE - total);
2281
2282         if (total > ISP1362_BUF_SIZE) {
2283                 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2284                         __func__, total, ISP1362_BUF_SIZE);
2285                 return -ENOMEM;
2286         }
2287
2288         total = istl_size + intl_size + atl_size;
2289         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2290
2291         for (i = 0; i < 2; i++) {
2292                 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2293                 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2294                 isp1362_hcd->istl_queue[i].blk_size = 4;
2295                 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2296                 snprintf(isp1362_hcd->istl_queue[i].name,
2297                          sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2298                 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2299                      isp1362_hcd->istl_queue[i].name,
2300                      isp1362_hcd->istl_queue[i].buf_start,
2301                      isp1362_hcd->istl_queue[i].buf_size);
2302         }
2303         isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2304
2305         isp1362_hcd->intl_queue.buf_start = istl_size;
2306         isp1362_hcd->intl_queue.buf_size = intl_size;
2307         isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2308         isp1362_hcd->intl_queue.blk_size = intl_blksize;
2309         isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2310         isp1362_hcd->intl_queue.skip_map = ~0;
2311         INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2312
2313         isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2314                             isp1362_hcd->intl_queue.buf_size);
2315         isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2316                             isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2317         isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2318         isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2319                             1 << (ISP1362_INTL_BUFFERS - 1));
2320
2321         isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2322         isp1362_hcd->atl_queue.buf_size = atl_size;
2323         isp1362_hcd->atl_queue.buf_count = atl_buffers;
2324         isp1362_hcd->atl_queue.blk_size = atl_blksize;
2325         isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2326         isp1362_hcd->atl_queue.skip_map = ~0;
2327         INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2328
2329         isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2330                             isp1362_hcd->atl_queue.buf_size);
2331         isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2332                             isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2333         isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2334         isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2335                             1 << (atl_buffers - 1));
2336
2337         snprintf(isp1362_hcd->atl_queue.name,
2338                  sizeof(isp1362_hcd->atl_queue.name), "ATL");
2339         snprintf(isp1362_hcd->intl_queue.name,
2340                  sizeof(isp1362_hcd->intl_queue.name), "INTL");
2341         DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2342              isp1362_hcd->intl_queue.name,
2343              isp1362_hcd->intl_queue.buf_start,
2344              ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2345              isp1362_hcd->intl_queue.buf_size);
2346         DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2347              isp1362_hcd->atl_queue.name,
2348              isp1362_hcd->atl_queue.buf_start,
2349              atl_buffers, isp1362_hcd->atl_queue.blk_size,
2350              isp1362_hcd->atl_queue.buf_size);
2351
2352         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2353
2354         return 0;
2355 }
2356
2357 static int isp1362_hc_reset(struct usb_hcd *hcd)
2358 {
2359         int ret = 0;
2360         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2361         unsigned long t;
2362         unsigned long timeout = 100;
2363         unsigned long flags;
2364         int clkrdy = 0;
2365
2366         pr_info("%s:\n", __func__);
2367
2368         if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2369                 isp1362_hcd->board->reset(hcd->self.controller, 1);
2370                 msleep(20);
2371                 if (isp1362_hcd->board->clock)
2372                         isp1362_hcd->board->clock(hcd->self.controller, 1);
2373                 isp1362_hcd->board->reset(hcd->self.controller, 0);
2374         } else
2375                 isp1362_sw_reset(isp1362_hcd);
2376
2377         /* chip has been reset. First we need to see a clock */
2378         t = jiffies + msecs_to_jiffies(timeout);
2379         while (!clkrdy && time_before_eq(jiffies, t)) {
2380                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2381                 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2382                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2383                 if (!clkrdy)
2384                         msleep(4);
2385         }
2386
2387         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2388         isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2389         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2390         if (!clkrdy) {
2391                 pr_err("Clock not ready after %lums\n", timeout);
2392                 ret = -ENODEV;
2393         }
2394         return ret;
2395 }
2396
2397 static void isp1362_hc_stop(struct usb_hcd *hcd)
2398 {
2399         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2400         unsigned long flags;
2401         u32 tmp;
2402
2403         pr_info("%s:\n", __func__);
2404
2405         del_timer_sync(&hcd->rh_timer);
2406
2407         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2408
2409         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2410
2411         /* Switch off power for all ports */
2412         tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2413         tmp &= ~(RH_A_NPS | RH_A_PSM);
2414         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2415         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2416
2417         /* Reset the chip */
2418         if (isp1362_hcd->board && isp1362_hcd->board->reset)
2419                 isp1362_hcd->board->reset(hcd->self.controller, 1);
2420         else
2421                 isp1362_sw_reset(isp1362_hcd);
2422
2423         if (isp1362_hcd->board && isp1362_hcd->board->clock)
2424                 isp1362_hcd->board->clock(hcd->self.controller, 0);
2425
2426         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2427 }
2428
2429 #ifdef CHIP_BUFFER_TEST
2430 static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2431 {
2432         int ret = 0;
2433         u16 *ref;
2434         unsigned long flags;
2435
2436         ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2437         if (ref) {
2438                 int offset;
2439                 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2440
2441                 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2442                         ref[offset] = ~offset;
2443                         tst[offset] = offset;
2444                 }
2445
2446                 for (offset = 0; offset < 4; offset++) {
2447                         int j;
2448
2449                         for (j = 0; j < 8; j++) {
2450                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2451                                 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2452                                 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2453                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2454
2455                                 if (memcmp(ref, tst, j)) {
2456                                         ret = -ENODEV;
2457                                         pr_err("%s: memory check with %d byte offset %d failed\n",
2458                                             __func__, j, offset);
2459                                         dump_data((u8 *)ref + offset, j);
2460                                         dump_data((u8 *)tst + offset, j);
2461                                 }
2462                         }
2463                 }
2464
2465                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2466                 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2467                 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2468                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2469
2470                 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2471                         ret = -ENODEV;
2472                         pr_err("%s: memory check failed\n", __func__);
2473                         dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2474                 }
2475
2476                 for (offset = 0; offset < 256; offset++) {
2477                         int test_size = 0;
2478
2479                         yield();
2480
2481                         memset(tst, 0, ISP1362_BUF_SIZE);
2482                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2483                         isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2484                         isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2485                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2486                         if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2487                                    ISP1362_BUF_SIZE / 2)) {
2488                                 pr_err("%s: Failed to clear buffer\n", __func__);
2489                                 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2490                                 break;
2491                         }
2492                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2493                         isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2494                         isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2495                                              offset * 2 + PTD_HEADER_SIZE, test_size);
2496                         isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2497                                             PTD_HEADER_SIZE + test_size);
2498                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2499                         if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2500                                 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2501                                 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2502                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2503                                 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2504                                                     PTD_HEADER_SIZE + test_size);
2505                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2506                                 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2507                                         ret = -ENODEV;
2508                                         pr_err("%s: memory check with offset %02x failed\n",
2509                                             __func__, offset);
2510                                         break;
2511                                 }
2512                                 pr_warning("%s: memory check with offset %02x ok after second read\n",
2513                                      __func__, offset);
2514                         }
2515                 }
2516                 kfree(ref);
2517         }
2518         return ret;
2519 }
2520 #endif
2521
2522 static int isp1362_hc_start(struct usb_hcd *hcd)
2523 {
2524         int ret;
2525         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2526         struct isp1362_platform_data *board = isp1362_hcd->board;
2527         u16 hwcfg;
2528         u16 chipid;
2529         unsigned long flags;
2530
2531         pr_info("%s:\n", __func__);
2532
2533         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2534         chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2535         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2536
2537         if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2538                 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2539                 return -ENODEV;
2540         }
2541
2542 #ifdef CHIP_BUFFER_TEST
2543         ret = isp1362_chip_test(isp1362_hcd);
2544         if (ret)
2545                 return -ENODEV;
2546 #endif
2547         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2548         /* clear interrupt status and disable all interrupt sources */
2549         isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2550         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2551
2552         /* HW conf */
2553         hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2554         if (board->sel15Kres)
2555                 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2556                         ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2557         if (board->clknotstop)
2558                 hwcfg |= HCHWCFG_CLKNOTSTOP;
2559         if (board->oc_enable)
2560                 hwcfg |= HCHWCFG_ANALOG_OC;
2561         if (board->int_act_high)
2562                 hwcfg |= HCHWCFG_INT_POL;
2563         if (board->int_edge_triggered)
2564                 hwcfg |= HCHWCFG_INT_TRIGGER;
2565         if (board->dreq_act_high)
2566                 hwcfg |= HCHWCFG_DREQ_POL;
2567         if (board->dack_act_high)
2568                 hwcfg |= HCHWCFG_DACK_POL;
2569         isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2570         isp1362_show_reg(isp1362_hcd, HCHWCFG);
2571         isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2572         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2573
2574         ret = isp1362_mem_config(hcd);
2575         if (ret)
2576                 return ret;
2577
2578         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2579
2580         /* Root hub conf */
2581         isp1362_hcd->rhdesca = 0;
2582         if (board->no_power_switching)
2583                 isp1362_hcd->rhdesca |= RH_A_NPS;
2584         if (board->power_switching_mode)
2585                 isp1362_hcd->rhdesca |= RH_A_PSM;
2586         if (board->potpg)
2587                 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2588         else
2589                 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2590
2591         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2592         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2593         isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2594
2595         isp1362_hcd->rhdescb = RH_B_PPCM;
2596         isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2597         isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2598
2599         isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2600         isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2601         isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2602
2603         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2604
2605         isp1362_hcd->hc_control = OHCI_USB_OPER;
2606         hcd->state = HC_STATE_RUNNING;
2607
2608         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2609         /* Set up interrupts */
2610         isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2611         isp1362_hcd->intenb |= OHCI_INTR_RD;
2612         isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2613         isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2614         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2615
2616         /* Go operational */
2617         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2618         /* enable global power */
2619         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2620
2621         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2622
2623         return 0;
2624 }
2625
2626 /*-------------------------------------------------------------------------*/
2627
2628 static struct hc_driver isp1362_hc_driver = {
2629         .description =          hcd_name,
2630         .product_desc =         "ISP1362 Host Controller",
2631         .hcd_priv_size =        sizeof(struct isp1362_hcd),
2632
2633         .irq =                  isp1362_irq,
2634         .flags =                HCD_USB11 | HCD_MEMORY,
2635
2636         .reset =                isp1362_hc_reset,
2637         .start =                isp1362_hc_start,
2638         .stop =                 isp1362_hc_stop,
2639
2640         .urb_enqueue =          isp1362_urb_enqueue,
2641         .urb_dequeue =          isp1362_urb_dequeue,
2642         .endpoint_disable =     isp1362_endpoint_disable,
2643
2644         .get_frame_number =     isp1362_get_frame,
2645
2646         .hub_status_data =      isp1362_hub_status_data,
2647         .hub_control =          isp1362_hub_control,
2648         .bus_suspend =          isp1362_bus_suspend,
2649         .bus_resume =           isp1362_bus_resume,
2650 };
2651
2652 /*-------------------------------------------------------------------------*/
2653
2654 #define resource_len(r) (((r)->end - (r)->start) + 1)
2655
2656 static int __devexit isp1362_remove(struct platform_device *pdev)
2657 {
2658         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2659         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2660         struct resource *res;
2661
2662         remove_debug_file(isp1362_hcd);
2663         DBG(0, "%s: Removing HCD\n", __func__);
2664         usb_remove_hcd(hcd);
2665
2666         DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2667             isp1362_hcd->data_reg);
2668         iounmap(isp1362_hcd->data_reg);
2669
2670         DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2671             isp1362_hcd->addr_reg);
2672         iounmap(isp1362_hcd->addr_reg);
2673
2674         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2675         DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2676         if (res)
2677                 release_mem_region(res->start, resource_len(res));
2678
2679         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2680         DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2681         if (res)
2682                 release_mem_region(res->start, resource_len(res));
2683
2684         DBG(0, "%s: put_hcd\n", __func__);
2685         usb_put_hcd(hcd);
2686         DBG(0, "%s: Done\n", __func__);
2687
2688         return 0;
2689 }
2690
2691 static int __init isp1362_probe(struct platform_device *pdev)
2692 {
2693         struct usb_hcd *hcd;
2694         struct isp1362_hcd *isp1362_hcd;
2695         struct resource *addr, *data;
2696         void __iomem *addr_reg;
2697         void __iomem *data_reg;
2698         int irq;
2699         int retval = 0;
2700         struct resource *irq_res;
2701         unsigned int irq_flags = 0;
2702
2703         /* basic sanity checks first.  board-specific init logic should
2704          * have initialized this the three resources and probably board
2705          * specific platform_data.  we don't probe for IRQs, and do only
2706          * minimal sanity checking.
2707          */
2708         if (pdev->num_resources < 3) {
2709                 retval = -ENODEV;
2710                 goto err1;
2711         }
2712
2713         data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2714         addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2715         irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2716         if (!addr || !data || !irq_res) {
2717                 retval = -ENODEV;
2718                 goto err1;
2719         }
2720         irq = irq_res->start;
2721
2722         if (pdev->dev.dma_mask) {
2723                 DBG(1, "won't do DMA");
2724                 retval = -ENODEV;
2725                 goto err1;
2726         }
2727
2728         if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
2729                 retval = -EBUSY;
2730                 goto err1;
2731         }
2732         addr_reg = ioremap(addr->start, resource_len(addr));
2733         if (addr_reg == NULL) {
2734                 retval = -ENOMEM;
2735                 goto err2;
2736         }
2737
2738         if (!request_mem_region(data->start, resource_len(data), hcd_name)) {
2739                 retval = -EBUSY;
2740                 goto err3;
2741         }
2742         data_reg = ioremap(data->start, resource_len(data));
2743         if (data_reg == NULL) {
2744                 retval = -ENOMEM;
2745                 goto err4;
2746         }
2747
2748         /* allocate and initialize hcd */
2749         hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2750         if (!hcd) {
2751                 retval = -ENOMEM;
2752                 goto err5;
2753         }
2754         hcd->rsrc_start = data->start;
2755         isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2756         isp1362_hcd->data_reg = data_reg;
2757         isp1362_hcd->addr_reg = addr_reg;
2758
2759         isp1362_hcd->next_statechange = jiffies;
2760         spin_lock_init(&isp1362_hcd->lock);
2761         INIT_LIST_HEAD(&isp1362_hcd->async);
2762         INIT_LIST_HEAD(&isp1362_hcd->periodic);
2763         INIT_LIST_HEAD(&isp1362_hcd->isoc);
2764         INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2765         isp1362_hcd->board = pdev->dev.platform_data;
2766 #if USE_PLATFORM_DELAY
2767         if (!isp1362_hcd->board->delay) {
2768                 dev_err(hcd->self.controller, "No platform delay function given\n");
2769                 retval = -ENODEV;
2770                 goto err6;
2771         }
2772 #endif
2773
2774         if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2775                 irq_flags |= IRQF_TRIGGER_RISING;
2776         if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2777                 irq_flags |= IRQF_TRIGGER_FALLING;
2778         if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2779                 irq_flags |= IRQF_TRIGGER_HIGH;
2780         if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2781                 irq_flags |= IRQF_TRIGGER_LOW;
2782
2783         retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
2784         if (retval != 0)
2785                 goto err6;
2786         pr_info("%s, irq %d\n", hcd->product_desc, irq);
2787
2788         create_debug_file(isp1362_hcd);
2789
2790         return 0;
2791
2792  err6:
2793         DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2794         usb_put_hcd(hcd);
2795  err5:
2796         DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2797         iounmap(data_reg);
2798  err4:
2799         DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2800         release_mem_region(data->start, resource_len(data));
2801  err3:
2802         DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2803         iounmap(addr_reg);
2804  err2:
2805         DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2806         release_mem_region(addr->start, resource_len(addr));
2807  err1:
2808         pr_err("%s: init error, %d\n", __func__, retval);
2809
2810         return retval;
2811 }
2812
2813 #ifdef  CONFIG_PM
2814 static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2815 {
2816         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2817         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2818         unsigned long flags;
2819         int retval = 0;
2820
2821         DBG(0, "%s: Suspending device\n", __func__);
2822
2823         if (state.event == PM_EVENT_FREEZE) {
2824                 DBG(0, "%s: Suspending root hub\n", __func__);
2825                 retval = isp1362_bus_suspend(hcd);
2826         } else {
2827                 DBG(0, "%s: Suspending RH ports\n", __func__);
2828                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2829                 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2830                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2831         }
2832         if (retval == 0)
2833                 pdev->dev.power.power_state = state;
2834         return retval;
2835 }
2836
2837 static int isp1362_resume(struct platform_device *pdev)
2838 {
2839         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2840         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2841         unsigned long flags;
2842
2843         DBG(0, "%s: Resuming\n", __func__);
2844
2845         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2846                 DBG(0, "%s: Resume RH ports\n", __func__);
2847                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2848                 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2849                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2850                 return 0;
2851         }
2852
2853         pdev->dev.power.power_state = PMSG_ON;
2854
2855         return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2856 }
2857 #else
2858 #define isp1362_suspend NULL
2859 #define isp1362_resume  NULL
2860 #endif
2861
2862 static struct platform_driver isp1362_driver = {
2863         .probe = isp1362_probe,
2864         .remove = __devexit_p(isp1362_remove),
2865
2866         .suspend = isp1362_suspend,
2867         .resume = isp1362_resume,
2868         .driver = {
2869                 .name = (char *)hcd_name,
2870                 .owner = THIS_MODULE,
2871         },
2872 };
2873
2874 /*-------------------------------------------------------------------------*/
2875
2876 static int __init isp1362_init(void)
2877 {
2878         if (usb_disabled())
2879                 return -ENODEV;
2880         pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
2881         return platform_driver_register(&isp1362_driver);
2882 }
2883 module_init(isp1362_init);
2884
2885 static void __exit isp1362_cleanup(void)
2886 {
2887         platform_driver_unregister(&isp1362_driver);
2888 }
2889 module_exit(isp1362_cleanup);