1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
11 #include <linux/usb.h>
14 /*-------------------------------------------------------------------------*/
16 static int override_alt = -1;
17 module_param_named(alt, override_alt, int, 0644);
18 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
20 /*-------------------------------------------------------------------------*/
22 /* FIXME make these public somewhere; usbdevfs.h? */
23 struct usbtest_param {
25 unsigned test_num; /* 0..(TEST_CASES-1) */
32 struct timeval duration;
34 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
36 /*-------------------------------------------------------------------------*/
38 #define GENERIC /* let probe() bind using module params */
40 /* Some devices that can be used for testing will have "real" drivers.
41 * Entries for those need to be enabled here by hand, after disabling
44 //#define IBOT2 /* grab iBOT2 webcams */
45 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
47 /*-------------------------------------------------------------------------*/
51 u8 ep_in; /* bulk/intr source */
52 u8 ep_out; /* bulk/intr sink */
55 unsigned iso:1; /* try iso in/out */
59 /* this is accessed only through usbfs ioctl calls.
60 * one ioctl to issue a test ... one lock per device.
61 * tests create other threads if they need them.
62 * urbs and buffers are allocated dynamically,
63 * and data generated deterministically.
66 struct usb_interface *intf;
67 struct usbtest_info *info;
72 struct usb_endpoint_descriptor *iso_in, *iso_out;
79 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
81 return interface_to_usbdev(test->intf);
84 /* set up all urbs so they can be used with either bulk or interrupt */
85 #define INTERRUPT_RATE 1 /* msec/transfer */
87 #define ERROR(tdev, fmt, args...) \
88 dev_err(&(tdev)->intf->dev , fmt , ## args)
89 #define WARNING(tdev, fmt, args...) \
90 dev_warn(&(tdev)->intf->dev , fmt , ## args)
92 #define GUARD_BYTE 0xA5
94 /*-------------------------------------------------------------------------*/
97 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
100 struct usb_host_interface *alt;
101 struct usb_host_endpoint *in, *out;
102 struct usb_host_endpoint *iso_in, *iso_out;
103 struct usb_device *udev;
105 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
109 iso_in = iso_out = NULL;
110 alt = intf->altsetting + tmp;
112 if (override_alt >= 0 &&
113 override_alt != alt->desc.bAlternateSetting)
116 /* take the first altsetting with in-bulk + out-bulk;
117 * ignore other endpoints and altsettings.
119 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
120 struct usb_host_endpoint *e;
122 e = alt->endpoint + ep;
123 switch (usb_endpoint_type(&e->desc)) {
124 case USB_ENDPOINT_XFER_BULK:
126 case USB_ENDPOINT_XFER_ISOC:
133 if (usb_endpoint_dir_in(&e->desc)) {
142 if (usb_endpoint_dir_in(&e->desc)) {
150 if ((in && out) || iso_in || iso_out)
156 udev = testdev_to_usbdev(dev);
157 dev->info->alt = alt->desc.bAlternateSetting;
158 if (alt->desc.bAlternateSetting != 0) {
159 tmp = usb_set_interface(udev,
160 alt->desc.bInterfaceNumber,
161 alt->desc.bAlternateSetting);
167 dev->in_pipe = usb_rcvbulkpipe(udev,
168 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
169 dev->out_pipe = usb_sndbulkpipe(udev,
170 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
173 dev->iso_in = &iso_in->desc;
174 dev->in_iso_pipe = usb_rcvisocpipe(udev,
175 iso_in->desc.bEndpointAddress
176 & USB_ENDPOINT_NUMBER_MASK);
180 dev->iso_out = &iso_out->desc;
181 dev->out_iso_pipe = usb_sndisocpipe(udev,
182 iso_out->desc.bEndpointAddress
183 & USB_ENDPOINT_NUMBER_MASK);
188 /*-------------------------------------------------------------------------*/
190 /* Support for testing basic non-queued I/O streams.
192 * These just package urbs as requests that can be easily canceled.
193 * Each urb's data buffer is dynamically allocated; callers can fill
194 * them with non-zero test data (or test for it) when appropriate.
197 static void simple_callback(struct urb *urb)
199 complete(urb->context);
202 static struct urb *usbtest_alloc_urb(
203 struct usb_device *udev,
206 unsigned transfer_flags,
211 urb = usb_alloc_urb(0, GFP_KERNEL);
214 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
215 urb->interval = (udev->speed == USB_SPEED_HIGH)
216 ? (INTERRUPT_RATE << 3)
218 urb->transfer_flags = transfer_flags;
219 if (usb_pipein(pipe))
220 urb->transfer_flags |= URB_SHORT_NOT_OK;
222 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
223 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
224 GFP_KERNEL, &urb->transfer_dma);
226 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
228 if (!urb->transfer_buffer) {
233 /* To test unaligned transfers add an offset and fill the
234 unused memory with a guard value */
236 memset(urb->transfer_buffer, GUARD_BYTE, offset);
237 urb->transfer_buffer += offset;
238 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
239 urb->transfer_dma += offset;
242 /* For inbound transfers use guard byte so that test fails if
243 data not correctly copied */
244 memset(urb->transfer_buffer,
245 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
250 static struct urb *simple_alloc_urb(
251 struct usb_device *udev,
255 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
258 static unsigned pattern;
259 static unsigned mod_pattern;
260 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
261 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
263 static inline void simple_fill_buf(struct urb *urb)
266 u8 *buf = urb->transfer_buffer;
267 unsigned len = urb->transfer_buffer_length;
276 for (i = 0; i < len; i++)
277 *buf++ = (u8) (i % 63);
282 static inline unsigned long buffer_offset(void *buf)
284 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
287 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
289 u8 *buf = urb->transfer_buffer;
290 u8 *guard = buf - buffer_offset(buf);
293 for (i = 0; guard < buf; i++, guard++) {
294 if (*guard != GUARD_BYTE) {
295 ERROR(tdev, "guard byte[%d] %d (not %d)\n",
296 i, *guard, GUARD_BYTE);
303 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
307 u8 *buf = urb->transfer_buffer;
308 unsigned len = urb->actual_length;
310 int ret = check_guard_bytes(tdev, urb);
314 for (i = 0; i < len; i++, buf++) {
316 /* all-zeroes has no synchronization issues */
320 /* mod63 stays in sync with short-terminated transfers,
321 * or otherwise when host and gadget agree on how large
322 * each usb transfer request should be. resync is done
323 * with set_interface or set_config.
328 /* always fail unsupported patterns */
333 if (*buf == expected)
335 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
341 static void simple_free_urb(struct urb *urb)
343 unsigned long offset = buffer_offset(urb->transfer_buffer);
345 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
348 urb->transfer_buffer_length + offset,
349 urb->transfer_buffer - offset,
350 urb->transfer_dma - offset);
352 kfree(urb->transfer_buffer - offset);
356 static int simple_io(
357 struct usbtest_dev *tdev,
365 struct usb_device *udev = urb->dev;
366 int max = urb->transfer_buffer_length;
367 struct completion completion;
370 urb->context = &completion;
371 while (retval == 0 && iterations-- > 0) {
372 init_completion(&completion);
373 if (usb_pipeout(urb->pipe)) {
374 simple_fill_buf(urb);
375 urb->transfer_flags |= URB_ZERO_PACKET;
377 retval = usb_submit_urb(urb, GFP_KERNEL);
381 /* NOTE: no timeouts; can't be broken out of by interrupt */
382 wait_for_completion(&completion);
383 retval = urb->status;
385 if (retval == 0 && usb_pipein(urb->pipe))
386 retval = simple_check_buf(tdev, urb);
389 int len = urb->transfer_buffer_length;
394 len = (vary < max) ? vary : max;
395 urb->transfer_buffer_length = len;
398 /* FIXME if endpoint halted, clear halt (and log) */
400 urb->transfer_buffer_length = max;
402 if (expected != retval)
404 "%s failed, iterations left %d, status %d (not %d)\n",
405 label, iterations, retval, expected);
410 /*-------------------------------------------------------------------------*/
412 /* We use scatterlist primitives to test queued I/O.
413 * Yes, this also tests the scatterlist primitives.
416 static void free_sglist(struct scatterlist *sg, int nents)
422 for (i = 0; i < nents; i++) {
423 if (!sg_page(&sg[i]))
425 kfree(sg_virt(&sg[i]));
430 static struct scatterlist *
431 alloc_sglist(int nents, int max, int vary)
433 struct scatterlist *sg;
440 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
443 sg_init_table(sg, nents);
445 for (i = 0; i < nents; i++) {
449 buf = kzalloc(size, GFP_KERNEL);
455 /* kmalloc pages are always physically contiguous! */
456 sg_set_buf(&sg[i], buf, size);
463 for (j = 0; j < size; j++)
464 *buf++ = (u8) (j % 63);
472 size = (vary < max) ? vary : max;
479 static int perform_sglist(
480 struct usbtest_dev *tdev,
483 struct usb_sg_request *req,
484 struct scatterlist *sg,
488 struct usb_device *udev = testdev_to_usbdev(tdev);
491 while (retval == 0 && iterations-- > 0) {
492 retval = usb_sg_init(req, udev, pipe,
493 (udev->speed == USB_SPEED_HIGH)
494 ? (INTERRUPT_RATE << 3)
496 sg, nents, 0, GFP_KERNEL);
501 retval = req->status;
503 /* FIXME check resulting data pattern */
505 /* FIXME if endpoint halted, clear halt (and log) */
508 /* FIXME for unlink or fault handling tests, don't report
509 * failure if retval is as we expected ...
512 ERROR(tdev, "perform_sglist failed, "
513 "iterations left %d, status %d\n",
519 /*-------------------------------------------------------------------------*/
521 /* unqueued control message testing
523 * there's a nice set of device functional requirements in chapter 9 of the
524 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
525 * special test firmware.
527 * we know the device is configured (or suspended) by the time it's visible
528 * through usbfs. we can't change that, so we won't test enumeration (which
529 * worked 'well enough' to get here, this time), power management (ditto),
530 * or remote wakeup (which needs human interaction).
533 static unsigned realworld = 1;
534 module_param(realworld, uint, 0);
535 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
537 static int get_altsetting(struct usbtest_dev *dev)
539 struct usb_interface *iface = dev->intf;
540 struct usb_device *udev = interface_to_usbdev(iface);
543 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
544 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
545 0, iface->altsetting[0].desc.bInterfaceNumber,
546 dev->buf, 1, USB_CTRL_GET_TIMEOUT);
558 static int set_altsetting(struct usbtest_dev *dev, int alternate)
560 struct usb_interface *iface = dev->intf;
561 struct usb_device *udev;
563 if (alternate < 0 || alternate >= 256)
566 udev = interface_to_usbdev(iface);
567 return usb_set_interface(udev,
568 iface->altsetting[0].desc.bInterfaceNumber,
572 static int is_good_config(struct usbtest_dev *tdev, int len)
574 struct usb_config_descriptor *config;
576 if (len < sizeof(*config))
578 config = (struct usb_config_descriptor *) tdev->buf;
580 switch (config->bDescriptorType) {
582 case USB_DT_OTHER_SPEED_CONFIG:
583 if (config->bLength != 9) {
584 ERROR(tdev, "bogus config descriptor length\n");
587 /* this bit 'must be 1' but often isn't */
588 if (!realworld && !(config->bmAttributes & 0x80)) {
589 ERROR(tdev, "high bit of config attributes not set\n");
592 if (config->bmAttributes & 0x1f) { /* reserved == 0 */
593 ERROR(tdev, "reserved config bits set\n");
601 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
603 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
605 ERROR(tdev, "bogus config descriptor read size\n");
609 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
611 struct usb_ext_cap_descriptor *ext;
614 ext = (struct usb_ext_cap_descriptor *) buf;
616 if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
617 ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
621 attr = le32_to_cpu(ext->bmAttributes);
622 /* bits[1:4] is used and others are reserved */
623 if (attr & ~0x1e) { /* reserved == 0 */
624 ERROR(tdev, "reserved bits set\n");
631 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
633 struct usb_ss_cap_descriptor *ss;
635 ss = (struct usb_ss_cap_descriptor *) buf;
637 if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
638 ERROR(tdev, "bogus superspeed device capability descriptor length\n");
643 * only bit[1] of bmAttributes is used for LTM and others are
646 if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
647 ERROR(tdev, "reserved bits set in bmAttributes\n");
651 /* bits[0:3] of wSpeedSupported is used and others are reserved */
652 if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
653 ERROR(tdev, "reserved bits set in wSpeedSupported\n");
660 /* sanity test for standard requests working with usb_control_mesg() and some
661 * of the utility functions which use it.
663 * this doesn't test how endpoint halts behave or data toggles get set, since
664 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
665 * halt or toggle). toggle testing is impractical without support from hcds.
667 * this avoids failing devices linux would normally work with, by not testing
668 * config/altsetting operations for devices that only support their defaults.
669 * such devices rarely support those needless operations.
671 * NOTE that since this is a sanity test, it's not examining boundary cases
672 * to see if usbcore, hcd, and device all behave right. such testing would
673 * involve varied read sizes and other operation sequences.
675 static int ch9_postconfig(struct usbtest_dev *dev)
677 struct usb_interface *iface = dev->intf;
678 struct usb_device *udev = interface_to_usbdev(iface);
681 /* [9.2.3] if there's more than one altsetting, we need to be able to
682 * set and get each one. mostly trusts the descriptors from usbcore.
684 for (i = 0; i < iface->num_altsetting; i++) {
686 /* 9.2.3 constrains the range here */
687 alt = iface->altsetting[i].desc.bAlternateSetting;
688 if (alt < 0 || alt >= iface->num_altsetting) {
690 "invalid alt [%d].bAltSetting = %d\n",
694 /* [real world] get/set unimplemented if there's only one */
695 if (realworld && iface->num_altsetting == 1)
698 /* [9.4.10] set_interface */
699 retval = set_altsetting(dev, alt);
701 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
706 /* [9.4.4] get_interface always works */
707 retval = get_altsetting(dev);
709 dev_err(&iface->dev, "get alt should be %d, was %d\n",
711 return (retval < 0) ? retval : -EDOM;
716 /* [real world] get_config unimplemented if there's only one */
717 if (!realworld || udev->descriptor.bNumConfigurations != 1) {
718 int expected = udev->actconfig->desc.bConfigurationValue;
720 /* [9.4.2] get_configuration always works
721 * ... although some cheap devices (like one TI Hub I've got)
722 * won't return config descriptors except before set_config.
724 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
725 USB_REQ_GET_CONFIGURATION,
726 USB_DIR_IN | USB_RECIP_DEVICE,
727 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
728 if (retval != 1 || dev->buf[0] != expected) {
729 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
730 retval, dev->buf[0], expected);
731 return (retval < 0) ? retval : -EDOM;
735 /* there's always [9.4.3] a device descriptor [9.6.1] */
736 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
737 dev->buf, sizeof(udev->descriptor));
738 if (retval != sizeof(udev->descriptor)) {
739 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
740 return (retval < 0) ? retval : -EDOM;
744 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
747 if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0300) {
748 struct usb_bos_descriptor *bos = NULL;
749 struct usb_dev_cap_header *header = NULL;
750 unsigned total, num, length;
753 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
754 sizeof(*udev->bos->desc));
755 if (retval != sizeof(*udev->bos->desc)) {
756 dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
757 return (retval < 0) ? retval : -EDOM;
760 bos = (struct usb_bos_descriptor *)dev->buf;
761 total = le16_to_cpu(bos->wTotalLength);
762 num = bos->bNumDeviceCaps;
764 if (total > TBUF_SIZE)
768 * get generic device-level capability descriptors [9.6.2]
771 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
773 if (retval != total) {
774 dev_err(&iface->dev, "bos descriptor set --> %d\n",
776 return (retval < 0) ? retval : -EDOM;
779 length = sizeof(*udev->bos->desc);
781 for (i = 0; i < num; i++) {
783 if (buf + sizeof(struct usb_dev_cap_header) >
787 header = (struct usb_dev_cap_header *)buf;
788 length = header->bLength;
790 if (header->bDescriptorType !=
791 USB_DT_DEVICE_CAPABILITY) {
792 dev_warn(&udev->dev, "not device capability descriptor, skip\n");
796 switch (header->bDevCapabilityType) {
797 case USB_CAP_TYPE_EXT:
798 if (buf + USB_DT_USB_EXT_CAP_SIZE >
800 !is_good_ext(dev, buf)) {
801 dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
805 case USB_SS_CAP_TYPE:
806 if (buf + USB_DT_USB_SS_CAP_SIZE >
808 !is_good_ss_cap(dev, buf)) {
809 dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
819 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
820 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
821 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
822 dev->buf, TBUF_SIZE);
823 if (!is_good_config(dev, retval)) {
825 "config [%d] descriptor --> %d\n",
827 return (retval < 0) ? retval : -EDOM;
830 /* FIXME cross-checking udev->config[i] to make sure usbcore
831 * parsed it right (etc) would be good testing paranoia
835 /* and sometimes [9.2.6.6] speed dependent descriptors */
836 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
837 struct usb_qualifier_descriptor *d = NULL;
839 /* device qualifier [9.6.2] */
840 retval = usb_get_descriptor(udev,
841 USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
842 sizeof(struct usb_qualifier_descriptor));
843 if (retval == -EPIPE) {
844 if (udev->speed == USB_SPEED_HIGH) {
846 "hs dev qualifier --> %d\n",
848 return (retval < 0) ? retval : -EDOM;
850 /* usb2.0 but not high-speed capable; fine */
851 } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
852 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
853 return (retval < 0) ? retval : -EDOM;
855 d = (struct usb_qualifier_descriptor *) dev->buf;
857 /* might not have [9.6.2] any other-speed configs [9.6.4] */
859 unsigned max = d->bNumConfigurations;
860 for (i = 0; i < max; i++) {
861 retval = usb_get_descriptor(udev,
862 USB_DT_OTHER_SPEED_CONFIG, i,
863 dev->buf, TBUF_SIZE);
864 if (!is_good_config(dev, retval)) {
866 "other speed config --> %d\n",
868 return (retval < 0) ? retval : -EDOM;
873 /* FIXME fetch strings from at least the device descriptor */
875 /* [9.4.5] get_status always works */
876 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
878 dev_err(&iface->dev, "get dev status --> %d\n", retval);
882 /* FIXME configuration.bmAttributes says if we could try to set/clear
883 * the device's remote wakeup feature ... if we can, test that here
886 retval = usb_get_status(udev, USB_RECIP_INTERFACE,
887 iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
889 dev_err(&iface->dev, "get interface status --> %d\n", retval);
892 /* FIXME get status for each endpoint in the interface */
897 /*-------------------------------------------------------------------------*/
899 /* use ch9 requests to test whether:
900 * (a) queues work for control, keeping N subtests queued and
901 * active (auto-resubmit) for M loops through the queue.
902 * (b) protocol stalls (control-only) will autorecover.
903 * it's not like bulk/intr; no halt clearing.
904 * (c) short control reads are reported and handled.
905 * (d) queues are always processed in-order
910 struct usbtest_dev *dev;
911 struct completion complete;
916 struct usbtest_param *param;
920 #define NUM_SUBCASES 15 /* how many test subcases here? */
923 struct usb_ctrlrequest setup;
928 static void ctrl_complete(struct urb *urb)
930 struct ctrl_ctx *ctx = urb->context;
931 struct usb_ctrlrequest *reqp;
932 struct subcase *subcase;
933 int status = urb->status;
935 reqp = (struct usb_ctrlrequest *)urb->setup_packet;
936 subcase = container_of(reqp, struct subcase, setup);
938 spin_lock(&ctx->lock);
942 /* queue must transfer and complete in fifo order, unless
943 * usb_unlink_urb() is used to unlink something not at the
944 * physical queue head (not tested).
946 if (subcase->number > 0) {
947 if ((subcase->number - ctx->last) != 1) {
949 "subcase %d completed out of order, last %d\n",
950 subcase->number, ctx->last);
952 ctx->last = subcase->number;
956 ctx->last = subcase->number;
958 /* succeed or fault in only one way? */
959 if (status == subcase->expected)
962 /* async unlink for cleanup? */
963 else if (status != -ECONNRESET) {
965 /* some faults are allowed, not required */
966 if (subcase->expected > 0 && (
967 ((status == -subcase->expected /* happened */
968 || status == 0)))) /* didn't */
970 /* sometimes more than one fault is allowed */
971 else if (subcase->number == 12 && status == -EPIPE)
974 ERROR(ctx->dev, "subtest %d error, status %d\n",
975 subcase->number, status);
978 /* unexpected status codes mean errors; ideally, in hardware */
981 if (ctx->status == 0) {
984 ctx->status = status;
985 ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
986 "%d left, subcase %d, len %d/%d\n",
987 reqp->bRequestType, reqp->bRequest,
988 status, ctx->count, subcase->number,
990 urb->transfer_buffer_length);
992 /* FIXME this "unlink everything" exit route should
993 * be a separate test case.
996 /* unlink whatever's still pending */
997 for (i = 1; i < ctx->param->sglen; i++) {
998 struct urb *u = ctx->urb[
999 (i + subcase->number)
1000 % ctx->param->sglen];
1002 if (u == urb || !u->dev)
1004 spin_unlock(&ctx->lock);
1005 status = usb_unlink_urb(u);
1006 spin_lock(&ctx->lock);
1013 ERROR(ctx->dev, "urb unlink --> %d\n",
1017 status = ctx->status;
1021 /* resubmit if we need to, else mark this as done */
1022 if ((status == 0) && (ctx->pending < ctx->count)) {
1023 status = usb_submit_urb(urb, GFP_ATOMIC);
1026 "can't resubmit ctrl %02x.%02x, err %d\n",
1027 reqp->bRequestType, reqp->bRequest, status);
1034 /* signal completion when nothing's queued */
1035 if (ctx->pending == 0)
1036 complete(&ctx->complete);
1037 spin_unlock(&ctx->lock);
1041 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
1043 struct usb_device *udev = testdev_to_usbdev(dev);
1045 struct ctrl_ctx context;
1048 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
1051 spin_lock_init(&context.lock);
1053 init_completion(&context.complete);
1054 context.count = param->sglen * param->iterations;
1055 context.pending = 0;
1056 context.status = -ENOMEM;
1057 context.param = param;
1060 /* allocate and init the urbs we'll queue.
1061 * as with bulk/intr sglists, sglen is the queue depth; it also
1062 * controls which subtests run (more tests than sglen) or rerun.
1064 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
1067 for (i = 0; i < param->sglen; i++) {
1068 int pipe = usb_rcvctrlpipe(udev, 0);
1071 struct usb_ctrlrequest req;
1072 struct subcase *reqp;
1074 /* sign of this variable means:
1075 * -: tested code must return this (negative) error code
1076 * +: tested code may return this (negative too) error code
1080 /* requests here are mostly expected to succeed on any
1081 * device, but some are chosen to trigger protocol stalls
1084 memset(&req, 0, sizeof(req));
1085 req.bRequest = USB_REQ_GET_DESCRIPTOR;
1086 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1088 switch (i % NUM_SUBCASES) {
1089 case 0: /* get device descriptor */
1090 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
1091 len = sizeof(struct usb_device_descriptor);
1093 case 1: /* get first config descriptor (only) */
1094 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1095 len = sizeof(struct usb_config_descriptor);
1097 case 2: /* get altsetting (OFTEN STALLS) */
1098 req.bRequest = USB_REQ_GET_INTERFACE;
1099 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1100 /* index = 0 means first interface */
1104 case 3: /* get interface status */
1105 req.bRequest = USB_REQ_GET_STATUS;
1106 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1110 case 4: /* get device status */
1111 req.bRequest = USB_REQ_GET_STATUS;
1112 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1115 case 5: /* get device qualifier (MAY STALL) */
1116 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
1117 len = sizeof(struct usb_qualifier_descriptor);
1118 if (udev->speed != USB_SPEED_HIGH)
1121 case 6: /* get first config descriptor, plus interface */
1122 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1123 len = sizeof(struct usb_config_descriptor);
1124 len += sizeof(struct usb_interface_descriptor);
1126 case 7: /* get interface descriptor (ALWAYS STALLS) */
1127 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1128 /* interface == 0 */
1129 len = sizeof(struct usb_interface_descriptor);
1132 /* NOTE: two consecutive stalls in the queue here.
1133 * that tests fault recovery a bit more aggressively. */
1134 case 8: /* clear endpoint halt (MAY STALL) */
1135 req.bRequest = USB_REQ_CLEAR_FEATURE;
1136 req.bRequestType = USB_RECIP_ENDPOINT;
1137 /* wValue 0 == ep halt */
1138 /* wIndex 0 == ep0 (shouldn't halt!) */
1140 pipe = usb_sndctrlpipe(udev, 0);
1143 case 9: /* get endpoint status */
1144 req.bRequest = USB_REQ_GET_STATUS;
1145 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1149 case 10: /* trigger short read (EREMOTEIO) */
1150 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1152 expected = -EREMOTEIO;
1154 /* NOTE: two consecutive _different_ faults in the queue. */
1155 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1156 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1158 len = sizeof(struct usb_interface_descriptor);
1161 /* NOTE: sometimes even a third fault in the queue! */
1162 case 12: /* get string 0 descriptor (MAY STALL) */
1163 req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1164 /* string == 0, for language IDs */
1165 len = sizeof(struct usb_interface_descriptor);
1166 /* may succeed when > 4 languages */
1167 expected = EREMOTEIO; /* or EPIPE, if no strings */
1169 case 13: /* short read, resembling case 10 */
1170 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1171 /* last data packet "should" be DATA1, not DATA0 */
1172 if (udev->speed == USB_SPEED_SUPER)
1175 len = 1024 - udev->descriptor.bMaxPacketSize0;
1176 expected = -EREMOTEIO;
1178 case 14: /* short read; try to fill the last packet */
1179 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1180 /* device descriptor size == 18 bytes */
1181 len = udev->descriptor.bMaxPacketSize0;
1182 if (udev->speed == USB_SPEED_SUPER)
1192 expected = -EREMOTEIO;
1195 ERROR(dev, "bogus number of ctrl queue testcases!\n");
1196 context.status = -EINVAL;
1199 req.wLength = cpu_to_le16(len);
1200 urb[i] = u = simple_alloc_urb(udev, pipe, len);
1204 reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
1208 reqp->number = i % NUM_SUBCASES;
1209 reqp->expected = expected;
1210 u->setup_packet = (char *) &reqp->setup;
1212 u->context = &context;
1213 u->complete = ctrl_complete;
1216 /* queue the urbs */
1218 spin_lock_irq(&context.lock);
1219 for (i = 0; i < param->sglen; i++) {
1220 context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1221 if (context.status != 0) {
1222 ERROR(dev, "can't submit urb[%d], status %d\n",
1224 context.count = context.pending;
1229 spin_unlock_irq(&context.lock);
1231 /* FIXME set timer and time out; provide a disconnect hook */
1233 /* wait for the last one to complete */
1234 if (context.pending > 0)
1235 wait_for_completion(&context.complete);
1238 for (i = 0; i < param->sglen; i++) {
1242 kfree(urb[i]->setup_packet);
1243 simple_free_urb(urb[i]);
1246 return context.status;
1251 /*-------------------------------------------------------------------------*/
1253 static void unlink1_callback(struct urb *urb)
1255 int status = urb->status;
1257 /* we "know" -EPIPE (stall) never happens */
1259 status = usb_submit_urb(urb, GFP_ATOMIC);
1261 urb->status = status;
1262 complete(urb->context);
1266 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1269 struct completion completion;
1272 init_completion(&completion);
1273 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1276 urb->context = &completion;
1277 urb->complete = unlink1_callback;
1279 /* keep the endpoint busy. there are lots of hc/hcd-internal
1280 * states, and testing should get to all of them over time.
1282 * FIXME want additional tests for when endpoint is STALLing
1283 * due to errors, or is just NAKing requests.
1285 retval = usb_submit_urb(urb, GFP_KERNEL);
1287 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1291 /* unlinking that should always work. variable delay tests more
1292 * hcd states and code paths, even with little other system load.
1294 msleep(jiffies % (2 * INTERRUPT_RATE));
1296 while (!completion_done(&completion)) {
1297 retval = usb_unlink_urb(urb);
1302 /* we can't unlink urbs while they're completing
1303 * or if they've completed, and we haven't
1304 * resubmitted. "normal" drivers would prevent
1305 * resubmission, but since we're testing unlink
1308 ERROR(dev, "unlink retry\n");
1315 dev_err(&dev->intf->dev,
1316 "unlink fail %d\n", retval);
1325 wait_for_completion(&completion);
1326 retval = urb->status;
1327 simple_free_urb(urb);
1330 return (retval == -ECONNRESET) ? 0 : retval - 1000;
1332 return (retval == -ENOENT || retval == -EPERM) ?
1336 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1340 /* test sync and async paths */
1341 retval = unlink1(dev, pipe, len, 1);
1343 retval = unlink1(dev, pipe, len, 0);
1347 /*-------------------------------------------------------------------------*/
1350 struct completion complete;
1357 static void unlink_queued_callback(struct urb *urb)
1359 int status = urb->status;
1360 struct queued_ctx *ctx = urb->context;
1364 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1365 if (status == -ECONNRESET)
1367 /* What error should we report if the URB completed normally? */
1370 ctx->status = status;
1373 if (atomic_dec_and_test(&ctx->pending))
1374 complete(&ctx->complete);
1377 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1380 struct queued_ctx ctx;
1381 struct usb_device *udev = testdev_to_usbdev(dev);
1385 int retval = -ENOMEM;
1387 init_completion(&ctx.complete);
1388 atomic_set(&ctx.pending, 1); /* One more than the actual value */
1392 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1395 memset(buf, 0, size);
1397 /* Allocate and init the urbs we'll queue */
1398 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1401 for (i = 0; i < num; i++) {
1402 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1405 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1406 unlink_queued_callback, &ctx);
1407 ctx.urbs[i]->transfer_dma = buf_dma;
1408 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1411 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1412 for (i = 0; i < num; i++) {
1413 atomic_inc(&ctx.pending);
1414 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1416 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1418 atomic_dec(&ctx.pending);
1419 ctx.status = retval;
1424 usb_unlink_urb(ctx.urbs[num - 4]);
1425 usb_unlink_urb(ctx.urbs[num - 2]);
1428 usb_unlink_urb(ctx.urbs[i]);
1431 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
1432 complete(&ctx.complete);
1433 wait_for_completion(&ctx.complete);
1434 retval = ctx.status;
1437 for (i = 0; i < num; i++)
1438 usb_free_urb(ctx.urbs[i]);
1441 usb_free_coherent(udev, size, buf, buf_dma);
1445 /*-------------------------------------------------------------------------*/
1447 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1452 /* shouldn't look or act halted */
1453 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1455 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1460 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1463 retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1469 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1474 /* should look and act halted */
1475 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1477 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1482 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1485 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1486 if (retval != -EPIPE)
1488 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1489 if (retval != -EPIPE)
1494 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1498 /* shouldn't look or act halted now */
1499 retval = verify_not_halted(tdev, ep, urb);
1503 /* set halt (protocol test only), verify it worked */
1504 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1505 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1506 USB_ENDPOINT_HALT, ep,
1507 NULL, 0, USB_CTRL_SET_TIMEOUT);
1509 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1512 retval = verify_halted(tdev, ep, urb);
1516 /* clear halt (tests API + protocol), verify it worked */
1517 retval = usb_clear_halt(urb->dev, urb->pipe);
1519 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1522 retval = verify_not_halted(tdev, ep, urb);
1526 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1531 static int halt_simple(struct usbtest_dev *dev)
1536 struct usb_device *udev = testdev_to_usbdev(dev);
1538 if (udev->speed == USB_SPEED_SUPER)
1539 urb = simple_alloc_urb(udev, 0, 1024);
1541 urb = simple_alloc_urb(udev, 0, 512);
1546 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1547 urb->pipe = dev->in_pipe;
1548 retval = test_halt(dev, ep, urb);
1553 if (dev->out_pipe) {
1554 ep = usb_pipeendpoint(dev->out_pipe);
1555 urb->pipe = dev->out_pipe;
1556 retval = test_halt(dev, ep, urb);
1559 simple_free_urb(urb);
1563 /*-------------------------------------------------------------------------*/
1565 /* Control OUT tests use the vendor control requests from Intel's
1566 * USB 2.0 compliance test device: write a buffer, read it back.
1568 * Intel's spec only _requires_ that it work for one packet, which
1569 * is pretty weak. Some HCDs place limits here; most devices will
1570 * need to be able to handle more than one OUT data packet. We'll
1571 * try whatever we're told to try.
1573 static int ctrl_out(struct usbtest_dev *dev,
1574 unsigned count, unsigned length, unsigned vary, unsigned offset)
1580 struct usb_device *udev;
1582 if (length < 1 || length > 0xffff || vary >= length)
1585 buf = kmalloc(length + offset, GFP_KERNEL);
1590 udev = testdev_to_usbdev(dev);
1594 /* NOTE: hardware might well act differently if we pushed it
1595 * with lots back-to-back queued requests.
1597 for (i = 0; i < count; i++) {
1598 /* write patterned data */
1599 for (j = 0; j < len; j++)
1601 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1602 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1603 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1604 if (retval != len) {
1607 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1614 /* read it back -- assuming nothing intervened!! */
1615 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1616 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1617 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1618 if (retval != len) {
1621 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1628 /* fail if we can't verify */
1629 for (j = 0; j < len; j++) {
1630 if (buf[j] != (u8) (i + j)) {
1631 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1632 j, buf[j], (u8) i + j);
1644 /* [real world] the "zero bytes IN" case isn't really used.
1645 * hardware can easily trip up in this weird case, since its
1646 * status stage is IN, not OUT like other ep0in transfers.
1649 len = realworld ? 1 : 0;
1653 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1656 kfree(buf - offset);
1660 /*-------------------------------------------------------------------------*/
1662 /* ISO tests ... mimics common usage
1663 * - buffer length is split into N packets (mostly maxpacket sized)
1664 * - multi-buffers according to sglen
1667 struct iso_context {
1671 struct completion done;
1673 unsigned long errors;
1674 unsigned long packet_count;
1675 struct usbtest_dev *dev;
1678 static void iso_callback(struct urb *urb)
1680 struct iso_context *ctx = urb->context;
1682 spin_lock(&ctx->lock);
1685 ctx->packet_count += urb->number_of_packets;
1686 if (urb->error_count > 0)
1687 ctx->errors += urb->error_count;
1688 else if (urb->status != 0)
1689 ctx->errors += urb->number_of_packets;
1690 else if (urb->actual_length != urb->transfer_buffer_length)
1692 else if (check_guard_bytes(ctx->dev, urb) != 0)
1695 if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1696 && !ctx->submit_error) {
1697 int status = usb_submit_urb(urb, GFP_ATOMIC);
1702 dev_err(&ctx->dev->intf->dev,
1703 "iso resubmit err %d\n",
1706 case -ENODEV: /* disconnected */
1707 case -ESHUTDOWN: /* endpoint disabled */
1708 ctx->submit_error = 1;
1714 if (ctx->pending == 0) {
1716 dev_err(&ctx->dev->intf->dev,
1717 "iso test, %lu errors out of %lu\n",
1718 ctx->errors, ctx->packet_count);
1719 complete(&ctx->done);
1722 spin_unlock(&ctx->lock);
1725 static struct urb *iso_alloc_urb(
1726 struct usb_device *udev,
1728 struct usb_endpoint_descriptor *desc,
1734 unsigned i, maxp, packets;
1736 if (bytes < 0 || !desc)
1738 maxp = 0x7ff & usb_endpoint_maxp(desc);
1739 maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1740 packets = DIV_ROUND_UP(bytes, maxp);
1742 urb = usb_alloc_urb(packets, GFP_KERNEL);
1748 urb->number_of_packets = packets;
1749 urb->transfer_buffer_length = bytes;
1750 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1752 &urb->transfer_dma);
1753 if (!urb->transfer_buffer) {
1758 memset(urb->transfer_buffer, GUARD_BYTE, offset);
1759 urb->transfer_buffer += offset;
1760 urb->transfer_dma += offset;
1762 /* For inbound transfers use guard byte so that test fails if
1763 data not correctly copied */
1764 memset(urb->transfer_buffer,
1765 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1768 for (i = 0; i < packets; i++) {
1769 /* here, only the last packet will be short */
1770 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1771 bytes -= urb->iso_frame_desc[i].length;
1773 urb->iso_frame_desc[i].offset = maxp * i;
1776 urb->complete = iso_callback;
1777 /* urb->context = SET BY CALLER */
1778 urb->interval = 1 << (desc->bInterval - 1);
1779 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1784 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1785 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1787 struct iso_context context;
1788 struct usb_device *udev;
1790 unsigned long packets = 0;
1792 struct urb *urbs[10]; /* FIXME no limit */
1794 if (param->sglen > 10)
1797 memset(&context, 0, sizeof(context));
1798 context.count = param->iterations * param->sglen;
1800 init_completion(&context.done);
1801 spin_lock_init(&context.lock);
1803 memset(urbs, 0, sizeof(urbs));
1804 udev = testdev_to_usbdev(dev);
1805 dev_info(&dev->intf->dev,
1806 "... iso period %d %sframes, wMaxPacket %04x\n",
1807 1 << (desc->bInterval - 1),
1808 (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1809 usb_endpoint_maxp(desc));
1811 for (i = 0; i < param->sglen; i++) {
1812 urbs[i] = iso_alloc_urb(udev, pipe, desc,
1813 param->length, offset);
1818 packets += urbs[i]->number_of_packets;
1819 urbs[i]->context = &context;
1821 packets *= param->iterations;
1822 dev_info(&dev->intf->dev,
1823 "... total %lu msec (%lu packets)\n",
1824 (packets * (1 << (desc->bInterval - 1)))
1825 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1828 spin_lock_irq(&context.lock);
1829 for (i = 0; i < param->sglen; i++) {
1831 status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1833 ERROR(dev, "submit iso[%d], error %d\n", i, status);
1835 spin_unlock_irq(&context.lock);
1839 simple_free_urb(urbs[i]);
1842 context.submit_error = 1;
1846 spin_unlock_irq(&context.lock);
1848 wait_for_completion(&context.done);
1850 for (i = 0; i < param->sglen; i++) {
1852 simple_free_urb(urbs[i]);
1855 * Isochronous transfers are expected to fail sometimes. As an
1856 * arbitrary limit, we will report an error if any submissions
1857 * fail or if the transfer failure rate is > 10%.
1861 else if (context.submit_error)
1863 else if (context.errors > context.packet_count / 10)
1868 for (i = 0; i < param->sglen; i++) {
1870 simple_free_urb(urbs[i]);
1875 static int test_unaligned_bulk(
1876 struct usbtest_dev *tdev,
1880 unsigned transfer_flags,
1884 struct urb *urb = usbtest_alloc_urb(
1885 testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1890 retval = simple_io(tdev, urb, iterations, 0, 0, label);
1891 simple_free_urb(urb);
1895 /*-------------------------------------------------------------------------*/
1897 /* We only have this one interface to user space, through usbfs.
1898 * User mode code can scan usbfs to find N different devices (maybe on
1899 * different busses) to use when testing, and allocate one thread per
1900 * test. So discovery is simplified, and we have no device naming issues.
1902 * Don't use these only as stress/load tests. Use them along with with
1903 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
1904 * video capture, and so on. Run different tests at different times, in
1905 * different sequences. Nothing here should interact with other devices,
1906 * except indirectly by consuming USB bandwidth and CPU resources for test
1907 * threads and request completion. But the only way to know that for sure
1908 * is to test when HC queues are in use by many devices.
1910 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
1911 * it locks out usbcore in certain code paths. Notably, if you disconnect
1912 * the device-under-test, khubd will wait block forever waiting for the
1913 * ioctl to complete ... so that usb_disconnect() can abort the pending
1914 * urbs and then call usbtest_disconnect(). To abort a test, you're best
1915 * off just killing the userspace task and waiting for it to exit.
1919 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1921 struct usbtest_dev *dev = usb_get_intfdata(intf);
1922 struct usb_device *udev = testdev_to_usbdev(dev);
1923 struct usbtest_param *param = buf;
1924 int retval = -EOPNOTSUPP;
1926 struct scatterlist *sg;
1927 struct usb_sg_request req;
1928 struct timeval start;
1931 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1933 pattern = mod_pattern;
1935 if (code != USBTEST_REQUEST)
1938 if (param->iterations <= 0)
1941 if (mutex_lock_interruptible(&dev->lock))
1942 return -ERESTARTSYS;
1944 /* FIXME: What if a system sleep starts while a test is running? */
1946 /* some devices, like ez-usb default devices, need a non-default
1947 * altsetting to have any active endpoints. some tests change
1948 * altsettings; force a default so most tests don't need to check.
1950 if (dev->info->alt >= 0) {
1953 if (intf->altsetting->desc.bInterfaceNumber) {
1954 mutex_unlock(&dev->lock);
1957 res = set_altsetting(dev, dev->info->alt);
1960 "set altsetting to %d failed, %d\n",
1961 dev->info->alt, res);
1962 mutex_unlock(&dev->lock);
1968 * Just a bunch of test cases that every HCD is expected to handle.
1970 * Some may need specific firmware, though it'd be good to have
1971 * one firmware image to handle all the test cases.
1973 * FIXME add more tests! cancel requests, verify the data, control
1974 * queueing, concurrent read+write threads, and so on.
1976 do_gettimeofday(&start);
1977 switch (param->test_num) {
1980 dev_info(&intf->dev, "TEST 0: NOP\n");
1984 /* Simple non-queued bulk I/O tests */
1986 if (dev->out_pipe == 0)
1988 dev_info(&intf->dev,
1989 "TEST 1: write %d bytes %u times\n",
1990 param->length, param->iterations);
1991 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1996 /* FIRMWARE: bulk sink (maybe accepts short writes) */
1997 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1998 simple_free_urb(urb);
2001 if (dev->in_pipe == 0)
2003 dev_info(&intf->dev,
2004 "TEST 2: read %d bytes %u times\n",
2005 param->length, param->iterations);
2006 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
2011 /* FIRMWARE: bulk source (maybe generates short writes) */
2012 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
2013 simple_free_urb(urb);
2016 if (dev->out_pipe == 0 || param->vary == 0)
2018 dev_info(&intf->dev,
2019 "TEST 3: write/%d 0..%d bytes %u times\n",
2020 param->vary, param->length, param->iterations);
2021 urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
2026 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2027 retval = simple_io(dev, urb, param->iterations, param->vary,
2029 simple_free_urb(urb);
2032 if (dev->in_pipe == 0 || param->vary == 0)
2034 dev_info(&intf->dev,
2035 "TEST 4: read/%d 0..%d bytes %u times\n",
2036 param->vary, param->length, param->iterations);
2037 urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
2042 /* FIRMWARE: bulk source (maybe generates short writes) */
2043 retval = simple_io(dev, urb, param->iterations, param->vary,
2045 simple_free_urb(urb);
2048 /* Queued bulk I/O tests */
2050 if (dev->out_pipe == 0 || param->sglen == 0)
2052 dev_info(&intf->dev,
2053 "TEST 5: write %d sglists %d entries of %d bytes\n",
2055 param->sglen, param->length);
2056 sg = alloc_sglist(param->sglen, param->length, 0);
2061 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2062 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2063 &req, sg, param->sglen);
2064 free_sglist(sg, param->sglen);
2068 if (dev->in_pipe == 0 || param->sglen == 0)
2070 dev_info(&intf->dev,
2071 "TEST 6: read %d sglists %d entries of %d bytes\n",
2073 param->sglen, param->length);
2074 sg = alloc_sglist(param->sglen, param->length, 0);
2079 /* FIRMWARE: bulk source (maybe generates short writes) */
2080 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2081 &req, sg, param->sglen);
2082 free_sglist(sg, param->sglen);
2085 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
2087 dev_info(&intf->dev,
2088 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
2089 param->vary, param->iterations,
2090 param->sglen, param->length);
2091 sg = alloc_sglist(param->sglen, param->length, param->vary);
2096 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2097 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2098 &req, sg, param->sglen);
2099 free_sglist(sg, param->sglen);
2102 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
2104 dev_info(&intf->dev,
2105 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
2106 param->vary, param->iterations,
2107 param->sglen, param->length);
2108 sg = alloc_sglist(param->sglen, param->length, param->vary);
2113 /* FIRMWARE: bulk source (maybe generates short writes) */
2114 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2115 &req, sg, param->sglen);
2116 free_sglist(sg, param->sglen);
2119 /* non-queued sanity tests for control (chapter 9 subset) */
2122 dev_info(&intf->dev,
2123 "TEST 9: ch9 (subset) control tests, %d times\n",
2125 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2126 retval = ch9_postconfig(dev);
2128 dev_err(&intf->dev, "ch9 subset failed, "
2129 "iterations left %d\n", i);
2132 /* queued control messaging */
2135 dev_info(&intf->dev,
2136 "TEST 10: queue %d control calls, %d times\n",
2139 retval = test_ctrl_queue(dev, param);
2142 /* simple non-queued unlinks (ring with one urb) */
2144 if (dev->in_pipe == 0 || !param->length)
2147 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
2148 param->iterations, param->length);
2149 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2150 retval = unlink_simple(dev, dev->in_pipe,
2153 dev_err(&intf->dev, "unlink reads failed %d, "
2154 "iterations left %d\n", retval, i);
2157 if (dev->out_pipe == 0 || !param->length)
2160 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
2161 param->iterations, param->length);
2162 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2163 retval = unlink_simple(dev, dev->out_pipe,
2166 dev_err(&intf->dev, "unlink writes failed %d, "
2167 "iterations left %d\n", retval, i);
2172 if (dev->out_pipe == 0 && dev->in_pipe == 0)
2175 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
2177 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2178 retval = halt_simple(dev);
2181 ERROR(dev, "halts failed, iterations left %d\n", i);
2184 /* control write tests */
2186 if (!dev->info->ctrl_out)
2188 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
2190 realworld ? 1 : 0, param->length,
2192 retval = ctrl_out(dev, param->iterations,
2193 param->length, param->vary, 0);
2196 /* iso write tests */
2198 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2200 dev_info(&intf->dev,
2201 "TEST 15: write %d iso, %d entries of %d bytes\n",
2203 param->sglen, param->length);
2204 /* FIRMWARE: iso sink */
2205 retval = test_iso_queue(dev, param,
2206 dev->out_iso_pipe, dev->iso_out, 0);
2209 /* iso read tests */
2211 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2213 dev_info(&intf->dev,
2214 "TEST 16: read %d iso, %d entries of %d bytes\n",
2216 param->sglen, param->length);
2217 /* FIRMWARE: iso source */
2218 retval = test_iso_queue(dev, param,
2219 dev->in_iso_pipe, dev->iso_in, 0);
2222 /* FIXME scatterlist cancel (needs helper thread) */
2224 /* Tests for bulk I/O using DMA mapping by core and odd address */
2226 if (dev->out_pipe == 0)
2228 dev_info(&intf->dev,
2229 "TEST 17: write odd addr %d bytes %u times core map\n",
2230 param->length, param->iterations);
2232 retval = test_unaligned_bulk(
2234 param->length, param->iterations,
2239 if (dev->in_pipe == 0)
2241 dev_info(&intf->dev,
2242 "TEST 18: read odd addr %d bytes %u times core map\n",
2243 param->length, param->iterations);
2245 retval = test_unaligned_bulk(
2247 param->length, param->iterations,
2251 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2253 if (dev->out_pipe == 0)
2255 dev_info(&intf->dev,
2256 "TEST 19: write odd addr %d bytes %u times premapped\n",
2257 param->length, param->iterations);
2259 retval = test_unaligned_bulk(
2261 param->length, param->iterations,
2262 URB_NO_TRANSFER_DMA_MAP, "test19");
2266 if (dev->in_pipe == 0)
2268 dev_info(&intf->dev,
2269 "TEST 20: read odd addr %d bytes %u times premapped\n",
2270 param->length, param->iterations);
2272 retval = test_unaligned_bulk(
2274 param->length, param->iterations,
2275 URB_NO_TRANSFER_DMA_MAP, "test20");
2278 /* control write tests with unaligned buffer */
2280 if (!dev->info->ctrl_out)
2282 dev_info(&intf->dev,
2283 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2285 realworld ? 1 : 0, param->length,
2287 retval = ctrl_out(dev, param->iterations,
2288 param->length, param->vary, 1);
2291 /* unaligned iso tests */
2293 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2295 dev_info(&intf->dev,
2296 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2298 param->sglen, param->length);
2299 retval = test_iso_queue(dev, param,
2300 dev->out_iso_pipe, dev->iso_out, 1);
2304 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2306 dev_info(&intf->dev,
2307 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2309 param->sglen, param->length);
2310 retval = test_iso_queue(dev, param,
2311 dev->in_iso_pipe, dev->iso_in, 1);
2314 /* unlink URBs from a bulk-OUT queue */
2316 if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2319 dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
2320 "%d %d-byte writes\n",
2321 param->iterations, param->sglen, param->length);
2322 for (i = param->iterations; retval == 0 && i > 0; --i) {
2323 retval = unlink_queued(dev, dev->out_pipe,
2324 param->sglen, param->length);
2327 "unlink queued writes failed %d, "
2328 "iterations left %d\n", retval, i);
2335 do_gettimeofday(¶m->duration);
2336 param->duration.tv_sec -= start.tv_sec;
2337 param->duration.tv_usec -= start.tv_usec;
2338 if (param->duration.tv_usec < 0) {
2339 param->duration.tv_usec += 1000 * 1000;
2340 param->duration.tv_sec -= 1;
2342 mutex_unlock(&dev->lock);
2346 /*-------------------------------------------------------------------------*/
2348 static unsigned force_interrupt;
2349 module_param(force_interrupt, uint, 0);
2350 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2353 static unsigned short vendor;
2354 module_param(vendor, ushort, 0);
2355 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2357 static unsigned short product;
2358 module_param(product, ushort, 0);
2359 MODULE_PARM_DESC(product, "product code (from vendor)");
2363 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2365 struct usb_device *udev;
2366 struct usbtest_dev *dev;
2367 struct usbtest_info *info;
2368 char *rtest, *wtest;
2369 char *irtest, *iwtest;
2371 udev = interface_to_usbdev(intf);
2374 /* specify devices by module parameters? */
2375 if (id->match_flags == 0) {
2376 /* vendor match required, product match optional */
2377 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2379 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2381 dev_info(&intf->dev, "matched module params, "
2382 "vend=0x%04x prod=0x%04x\n",
2383 le16_to_cpu(udev->descriptor.idVendor),
2384 le16_to_cpu(udev->descriptor.idProduct));
2388 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2391 info = (struct usbtest_info *) id->driver_info;
2393 mutex_init(&dev->lock);
2397 /* cacheline-aligned scratch for i/o */
2398 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2399 if (dev->buf == NULL) {
2404 /* NOTE this doesn't yet test the handful of difference that are
2405 * visible with high speed interrupts: bigger maxpacket (1K) and
2406 * "high bandwidth" modes (up to 3 packets/uframe).
2409 irtest = iwtest = "";
2410 if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2412 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2416 dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2417 wtest = " intr-out";
2420 if (override_alt >= 0 || info->autoconf) {
2423 status = get_endpoints(dev, intf);
2425 WARNING(dev, "couldn't get endpoints, %d\n",
2431 /* may find bulk or ISO pipes */
2434 dev->in_pipe = usb_rcvbulkpipe(udev,
2437 dev->out_pipe = usb_sndbulkpipe(udev,
2443 wtest = " bulk-out";
2444 if (dev->in_iso_pipe)
2446 if (dev->out_iso_pipe)
2447 iwtest = " iso-out";
2450 usb_set_intfdata(intf, dev);
2451 dev_info(&intf->dev, "%s\n", info->name);
2452 dev_info(&intf->dev, "%s {control%s%s%s%s%s} tests%s\n",
2453 usb_speed_string(udev->speed),
2454 info->ctrl_out ? " in/out" : "",
2457 info->alt >= 0 ? " (+alt)" : "");
2461 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2466 static int usbtest_resume(struct usb_interface *intf)
2472 static void usbtest_disconnect(struct usb_interface *intf)
2474 struct usbtest_dev *dev = usb_get_intfdata(intf);
2476 usb_set_intfdata(intf, NULL);
2477 dev_dbg(&intf->dev, "disconnect\n");
2481 /* Basic testing only needs a device that can source or sink bulk traffic.
2482 * Any device can test control transfers (default with GENERIC binding).
2484 * Several entries work with the default EP0 implementation that's built
2485 * into EZ-USB chips. There's a default vendor ID which can be overridden
2486 * by (very) small config EEPROMS, but otherwise all these devices act
2487 * identically until firmware is loaded: only EP0 works. It turns out
2488 * to be easy to make other endpoints work, without modifying that EP0
2489 * behavior. For now, we expect that kind of firmware.
2492 /* an21xx or fx versions of ez-usb */
2493 static struct usbtest_info ez1_info = {
2494 .name = "EZ-USB device",
2500 /* fx2 version of ez-usb */
2501 static struct usbtest_info ez2_info = {
2502 .name = "FX2 device",
2508 /* ezusb family device with dedicated usb test firmware,
2510 static struct usbtest_info fw_info = {
2511 .name = "usb test device",
2515 .autoconf = 1, /* iso and ctrl_out need autoconf */
2517 .iso = 1, /* iso_ep's are #8 in/out */
2520 /* peripheral running Linux and 'zero.c' test firmware, or
2521 * its user-mode cousin. different versions of this use
2522 * different hardware with the same vendor/product codes.
2523 * host side MUST rely on the endpoint descriptors.
2525 static struct usbtest_info gz_info = {
2526 .name = "Linux gadget zero",
2533 static struct usbtest_info um_info = {
2534 .name = "Linux user mode test driver",
2539 static struct usbtest_info um2_info = {
2540 .name = "Linux user mode ISO test driver",
2547 /* this is a nice source of high speed bulk data;
2548 * uses an FX2, with firmware provided in the device
2550 static struct usbtest_info ibot2_info = {
2551 .name = "iBOT2 webcam",
2558 /* we can use any device to test control traffic */
2559 static struct usbtest_info generic_info = {
2560 .name = "Generic USB device",
2566 static const struct usb_device_id id_table[] = {
2568 /*-------------------------------------------------------------*/
2570 /* EZ-USB devices which download firmware to replace (or in our
2571 * case augment) the default device implementation.
2574 /* generic EZ-USB FX controller */
2575 { USB_DEVICE(0x0547, 0x2235),
2576 .driver_info = (unsigned long) &ez1_info,
2579 /* CY3671 development board with EZ-USB FX */
2580 { USB_DEVICE(0x0547, 0x0080),
2581 .driver_info = (unsigned long) &ez1_info,
2584 /* generic EZ-USB FX2 controller (or development board) */
2585 { USB_DEVICE(0x04b4, 0x8613),
2586 .driver_info = (unsigned long) &ez2_info,
2589 /* re-enumerated usb test device firmware */
2590 { USB_DEVICE(0xfff0, 0xfff0),
2591 .driver_info = (unsigned long) &fw_info,
2594 /* "Gadget Zero" firmware runs under Linux */
2595 { USB_DEVICE(0x0525, 0xa4a0),
2596 .driver_info = (unsigned long) &gz_info,
2599 /* so does a user-mode variant */
2600 { USB_DEVICE(0x0525, 0xa4a4),
2601 .driver_info = (unsigned long) &um_info,
2604 /* ... and a user-mode variant that talks iso */
2605 { USB_DEVICE(0x0525, 0xa4a3),
2606 .driver_info = (unsigned long) &um2_info,
2610 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2611 /* this does not coexist with the real Keyspan 19qi driver! */
2612 { USB_DEVICE(0x06cd, 0x010b),
2613 .driver_info = (unsigned long) &ez1_info,
2617 /*-------------------------------------------------------------*/
2620 /* iBOT2 makes a nice source of high speed bulk-in data */
2621 /* this does not coexist with a real iBOT2 driver! */
2622 { USB_DEVICE(0x0b62, 0x0059),
2623 .driver_info = (unsigned long) &ibot2_info,
2627 /*-------------------------------------------------------------*/
2630 /* module params can specify devices to use for control tests */
2631 { .driver_info = (unsigned long) &generic_info, },
2634 /*-------------------------------------------------------------*/
2638 MODULE_DEVICE_TABLE(usb, id_table);
2640 static struct usb_driver usbtest_driver = {
2642 .id_table = id_table,
2643 .probe = usbtest_probe,
2644 .unlocked_ioctl = usbtest_ioctl,
2645 .disconnect = usbtest_disconnect,
2646 .suspend = usbtest_suspend,
2647 .resume = usbtest_resume,
2650 /*-------------------------------------------------------------------------*/
2652 static int __init usbtest_init(void)
2656 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2658 return usb_register(&usbtest_driver);
2660 module_init(usbtest_init);
2662 static void __exit usbtest_exit(void)
2664 usb_deregister(&usbtest_driver);
2666 module_exit(usbtest_exit);
2668 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2669 MODULE_LICENSE("GPL");