提交 7de8b926 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (34 commits)
  USB: xhci: Stall handling bug fixes.
  USB: xhci: Support for 64-byte contexts
  USB: xhci: Always align output device contexts to 64 bytes.
  USB: xhci: Scratchpad buffer allocation
  USB: Fix parsing of SuperSpeed Endpoint Companion descriptor.
  USB: xhci: Fail gracefully if there's no SS ep companion descriptor.
  USB: xhci: Handle babble errors on transfers.
  USB: xhci: Setup HW retries correctly.
  USB: xhci: Check if the host controller died in IRQ handler.
  USB: xhci: Don't oops if the host doesn't halt.
  USB: xhci: Make debugging more verbose.
  USB: xhci: Correct Event Handler Busy flag usage.
  USB: xhci: Handle short control packets correctly.
  USB: xhci: Represent 64-bit addresses with one u64.
  USB: xhci: Use GFP_ATOMIC while holding spinlocks.
  USB: xhci: Deal with stalled endpoints.
  USB: xhci: Set TD size in transfer TRB.
  USB: xhci: fix less- and greater than confusion
  USB: usbtest: no need for USB_DEVICEFS
  USB: musb: fix CONFIGDATA register read issue
  ...
...@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ...@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int max_tx; int max_tx;
int i; int i;
/* Allocate space for the SS endpoint companion descriptor */
ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
GFP_KERNEL);
if (!ep->ss_ep_comp)
return -ENOMEM;
desc = (struct usb_ss_ep_comp_descriptor *) buffer; desc = (struct usb_ss_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: " " interface %d altsetting %d ep %d: "
"using minimum values\n", "using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
ep->ss_ep_comp->desc.bMaxBurst = 0;
/*
* Leave bmAttributes as zero, which will mean no streams for
* bulk, and isoc won't support multiple bursts of packets.
* With bursts of only one packet, and a Mult of 1, the max
* amount of data moved per endpoint service interval is one
* packet.
*/
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc))
ep->ss_ep_comp->desc.wBytesPerInterval =
ep->desc.wMaxPacketSize;
/* /*
* The next descriptor is for an Endpoint or Interface, * The next descriptor is for an Endpoint or Interface,
* no extra descriptors to copy into the companion structure, * no extra descriptors to copy into the companion structure,
* and we didn't eat up any of the buffer. * and we didn't eat up any of the buffer.
*/ */
retval = 0; return 0;
goto valid;
} }
memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
desc = &ep->ss_ep_comp->desc; desc = &ep->ss_ep_comp->desc;
...@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, ...@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
buffer += i; buffer += i;
size -= i; size -= i;
/* Allocate space for the SS endpoint companion descriptor */
endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
GFP_KERNEL);
if (!endpoint->ss_ep_comp)
return -ENOMEM;
/* Fill in some default values (may be overwritten later) */
endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
endpoint->ss_ep_comp->desc.bMaxBurst = 0;
/*
* Leave bmAttributes as zero, which will mean no streams for
* bulk, and isoc won't support multiple bursts of packets.
* With bursts of only one packet, and a Mult of 1, the max
* amount of data moved per endpoint service interval is one
* packet.
*/
if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
usb_endpoint_xfer_int(&endpoint->desc))
endpoint->ss_ep_comp->desc.wBytesPerInterval =
endpoint->desc.wMaxPacketSize;
if (size > 0) { if (size > 0) {
retval = usb_parse_ss_endpoint_companion(ddev, cfgno, retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, num_ep, buffer, inum, asnum, endpoint, num_ep, buffer,
...@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, ...@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
retval = buffer - buffer0; retval = buffer - buffer0;
} }
} else { } else {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has no "
"SuperSpeed companion descriptor\n",
cfgno, inum, asnum, d->bEndpointAddress);
retval = buffer - buffer0; retval = buffer - buffer0;
} }
} else { } else {
......
...@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) ...@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval; int retval;
ehci_reset(ehci);
retval = ehci_halt(ehci); retval = ehci_halt(ehci);
if (retval) if (retval)
return retval; return retval;
...@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) ...@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
hcd->has_tt = 1; hcd->has_tt = 1;
ehci_reset(ehci);
ehci_port_power(ehci, 0); ehci_port_power(ehci, 0);
return retval; return retval;
......
...@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) ...@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
static void ohci_omap_stop(struct usb_hcd *hcd) static void ohci_omap_stop(struct usb_hcd *hcd)
{ {
dev_dbg(hcd->self.controller, "stopping USB Controller\n"); dev_dbg(hcd->self.controller, "stopping USB Controller\n");
ohci_stop(hcd);
omap_ohci_clock_power(0); omap_ohci_clock_power(0);
} }
......
...@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int ...@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
{ {
void *addr; void *addr;
u32 temp; u32 temp;
u64 temp_64;
addr = &ir_set->irq_pending; addr = &ir_set->irq_pending;
temp = xhci_readl(xhci, addr); temp = xhci_readl(xhci, addr);
...@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int ...@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
addr, (unsigned int)temp); addr, (unsigned int)temp);
addr = &ir_set->erst_base[0]; addr = &ir_set->erst_base;
temp = xhci_readl(xhci, addr); temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, (unsigned int) temp); addr, temp_64);
addr = &ir_set->erst_base[1];
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
addr, (unsigned int) temp);
addr = &ir_set->erst_dequeue[0]; addr = &ir_set->erst_dequeue;
temp = xhci_readl(xhci, addr); temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, (unsigned int) temp); addr, temp_64);
addr = &ir_set->erst_dequeue[1];
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
addr, (unsigned int) temp);
} }
void xhci_print_run_regs(struct xhci_hcd *xhci) void xhci_print_run_regs(struct xhci_hcd *xhci)
...@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "Link TRB:\n"); xhci_dbg(xhci, "Link TRB:\n");
xhci_print_trb_offsets(xhci, trb); xhci_print_trb_offsets(xhci, trb);
address = trb->link.segment_ptr[0] + address = trb->link.segment_ptr;
(((u64) trb->link.segment_ptr[1]) << 32);
xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
xhci_dbg(xhci, "Interrupter target = 0x%x\n", xhci_dbg(xhci, "Interrupter target = 0x%x\n",
...@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
(unsigned int) (trb->link.control & TRB_NO_SNOOP)); (unsigned int) (trb->link.control & TRB_NO_SNOOP));
break; break;
case TRB_TYPE(TRB_TRANSFER): case TRB_TYPE(TRB_TRANSFER):
address = trb->trans_event.buffer[0] + address = trb->trans_event.buffer;
(((u64) trb->trans_event.buffer[1]) << 32);
/* /*
* FIXME: look at flags to figure out if it's an address or if * FIXME: look at flags to figure out if it's an address or if
* the data is directly in the buffer field. * the data is directly in the buffer field.
...@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
break; break;
case TRB_TYPE(TRB_COMPLETION): case TRB_TYPE(TRB_COMPLETION):
address = trb->event_cmd.cmd_trb[0] + address = trb->event_cmd.cmd_trb;
(((u64) trb->event_cmd.cmd_trb[1]) << 32);
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
xhci_dbg(xhci, "Completion status = %u\n", xhci_dbg(xhci, "Completion status = %u\n",
(unsigned int) GET_COMP_CODE(trb->event_cmd.status)); (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
...@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) ...@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
for (i = 0; i < TRBS_PER_SEGMENT; ++i) { for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &seg->trbs[i]; trb = &seg->trbs[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
(unsigned int) trb->link.segment_ptr[0], lower_32_bits(trb->link.segment_ptr),
(unsigned int) trb->link.segment_ptr[1], upper_32_bits(trb->link.segment_ptr),
(unsigned int) trb->link.intr_target, (unsigned int) trb->link.intr_target,
(unsigned int) trb->link.control); (unsigned int) trb->link.control);
addr += sizeof(*trb); addr += sizeof(*trb);
...@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) ...@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
entry = &erst->entries[i]; entry = &erst->entries[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
(unsigned int) addr, (unsigned int) addr,
(unsigned int) entry->seg_addr[0], lower_32_bits(entry->seg_addr),
(unsigned int) entry->seg_addr[1], upper_32_bits(entry->seg_addr),
(unsigned int) entry->seg_size, (unsigned int) entry->seg_size,
(unsigned int) entry->rsvd); (unsigned int) entry->rsvd);
addr += sizeof(*entry); addr += sizeof(*entry);
...@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) ...@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{ {
u32 val; u64 val;
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
upper_32_bits(val));
} }
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) /* Print the last 32 bytes for 64-byte contexts */
static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
{
int i;
for (i = 0; i < 4; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx "
"(dma) %#08llx - rsvd64[%d]\n",
&ctx[4 + i], (unsigned long long)dma,
ctx[4 + i], i);
dma += 8;
}
}
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{ {
int i, j;
int last_ep_ctx = 31;
/* Fields are 32 bits wide, DMA addresses are in bytes */ /* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8; int field_size = 32 / 8;
int i;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
&ctx->drop_flags, (unsigned long long)dma, dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
ctx->drop_flags); int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
&ctx->add_flags, (unsigned long long)dma,
ctx->add_flags);
dma += field_size;
for (i = 0; i > 6; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ctx->rsvd[i], (unsigned long long)dma,
ctx->rsvd[i], i);
dma += field_size;
}
xhci_dbg(xhci, "Slot Context:\n"); xhci_dbg(xhci, "Slot Context:\n");
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
&ctx->slot.dev_info, &slot_ctx->dev_info,
(unsigned long long)dma, ctx->slot.dev_info); (unsigned long long)dma, slot_ctx->dev_info);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
&ctx->slot.dev_info2, &slot_ctx->dev_info2,
(unsigned long long)dma, ctx->slot.dev_info2); (unsigned long long)dma, slot_ctx->dev_info2);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
&ctx->slot.tt_info, &slot_ctx->tt_info,
(unsigned long long)dma, ctx->slot.tt_info); (unsigned long long)dma, slot_ctx->tt_info);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
&ctx->slot.dev_state, &slot_ctx->dev_state,
(unsigned long long)dma, ctx->slot.dev_state); (unsigned long long)dma, slot_ctx->dev_state);
dma += field_size; dma += field_size;
for (i = 0; i > 4; ++i) { for (i = 0; i < 4; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ctx->slot.reserved[i], (unsigned long long)dma, &slot_ctx->reserved[i], (unsigned long long)dma,
ctx->slot.reserved[i], i); slot_ctx->reserved[i], i);
dma += field_size; dma += field_size;
} }
if (csz)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
int i, j;
int last_ep_ctx = 31;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (last_ep < 31) if (last_ep < 31)
last_ep_ctx = last_ep + 1; last_ep_ctx = last_ep + 1;
for (i = 0; i < last_ep_ctx; ++i) { for (i = 0; i < last_ep_ctx; ++i) {
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma +
((unsigned long)ep_ctx - (unsigned long)ctx);
xhci_dbg(xhci, "Endpoint %02d Context:\n", i); xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
&ctx->ep[i].ep_info, &ep_ctx->ep_info,
(unsigned long long)dma, ctx->ep[i].ep_info); (unsigned long long)dma, ep_ctx->ep_info);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
&ctx->ep[i].ep_info2, &ep_ctx->ep_info2,
(unsigned long long)dma, ctx->ep[i].ep_info2); (unsigned long long)dma, ep_ctx->ep_info2);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
&ctx->ep[i].deq[0],
(unsigned long long)dma, ctx->ep[i].deq[0]);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
&ctx->ep[i].deq[1],
(unsigned long long)dma, ctx->ep[i].deq[1]);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
&ep_ctx->deq,
(unsigned long long)dma, ep_ctx->deq);
dma += 2*field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
&ctx->ep[i].tx_info, &ep_ctx->tx_info,
(unsigned long long)dma, ctx->ep[i].tx_info); (unsigned long long)dma, ep_ctx->tx_info);
dma += field_size; dma += field_size;
for (j = 0; j < 3; ++j) { for (j = 0; j < 3; ++j) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ctx->ep[i].reserved[j], &ep_ctx->reserved[j],
(unsigned long long)dma, (unsigned long long)dma,
ctx->ep[i].reserved[j], j); ep_ctx->reserved[j], j);
dma += field_size;
}
if (csz)
dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
}
}
void xhci_dbg_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
int i;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
struct xhci_slot_ctx *slot_ctx;
dma_addr_t dma = ctx->dma;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (ctx->type == XHCI_CTX_TYPE_INPUT) {
struct xhci_input_control_ctx *ctrl_ctx =
xhci_get_input_control_ctx(xhci, ctx);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
&ctrl_ctx->drop_flags, (unsigned long long)dma,
ctrl_ctx->drop_flags);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
&ctrl_ctx->add_flags, (unsigned long long)dma,
ctrl_ctx->add_flags);
dma += field_size;
for (i = 0; i < 6; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
&ctrl_ctx->rsvd2[i], (unsigned long long)dma,
ctrl_ctx->rsvd2[i], i);
dma += field_size; dma += field_size;
} }
if (csz)
dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
} }
slot_ctx = xhci_get_slot_ctx(xhci, ctx);
xhci_dbg_slot_ctx(xhci, ctx);
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
} }
此差异已折叠。
...@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, ...@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
return; return;
prev->next = next; prev->next = next;
if (link_trbs) { if (link_trbs) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
...@@ -189,6 +189,63 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -189,6 +189,63 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0; return 0;
} }
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return NULL;
BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
ctx->type = type;
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
memset(ctx->bytes, 0, ctx->size);
return ctx;
}
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
return (struct xhci_input_control_ctx *)ctx->bytes;
}
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
return (struct xhci_slot_ctx *)ctx->bytes;
return (struct xhci_slot_ctx *)
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int ep_index)
{
/* increment ep index by offset of start of ep ctx array */
ep_index++;
if (ctx->type == XHCI_CTX_TYPE_INPUT)
ep_index++;
return (struct xhci_ep_ctx *)
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
/* All the xhci_tds in the ring's TD list should be freed at this point */ /* All the xhci_tds in the ring's TD list should be freed at this point */
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{ {
...@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
return; return;
dev = xhci->devs[slot_id]; dev = xhci->devs[slot_id];
xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
if (!dev) if (!dev)
return; return;
...@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci_ring_free(xhci, dev->ep_rings[i]); xhci_ring_free(xhci, dev->ep_rings[i]);
if (dev->in_ctx) if (dev->in_ctx)
dma_pool_free(xhci->device_pool, xhci_free_container_ctx(xhci, dev->in_ctx);
dev->in_ctx, dev->in_ctx_dma);
if (dev->out_ctx) if (dev->out_ctx)
dma_pool_free(xhci->device_pool, xhci_free_container_ctx(xhci, dev->out_ctx);
dev->out_ctx, dev->out_ctx_dma);
kfree(xhci->devs[slot_id]); kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = 0; xhci->devs[slot_id] = 0;
} }
...@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags) struct usb_device *udev, gfp_t flags)
{ {
dma_addr_t dma;
struct xhci_virt_device *dev; struct xhci_virt_device *dev;
/* Slot ID 0 is reserved */ /* Slot ID 0 is reserved */
...@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
return 0; return 0;
dev = xhci->devs[slot_id]; dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC */ /* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx) if (!dev->out_ctx)
goto fail; goto fail;
dev->out_ctx_dma = dma;
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dma); (unsigned long long)dev->out_ctx->dma);
memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
/* Allocate the (input) device context for address device command */ /* Allocate the (input) device context for address device command */
dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
if (!dev->in_ctx) if (!dev->in_ctx)
goto fail; goto fail;
dev->in_ctx_dma = dma;
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dma); (unsigned long long)dev->in_ctx->dma);
memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
/* Allocate endpoint 0 ring */ /* Allocate endpoint 0 ring */
dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
...@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
init_completion(&dev->cmd_completion); init_completion(&dev->cmd_completion);
/* /* Point to output device context in dcbaa. */
* Point to output device context in dcbaa; skip the output control xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
* context, which is eight 32 bit fields (or 32 bytes long)
*/
xhci->dcbaa->dev_context_ptrs[2*slot_id] =
(u32) dev->out_ctx_dma + (32);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id, slot_id,
&xhci->dcbaa->dev_context_ptrs[2*slot_id], &xhci->dcbaa->dev_context_ptrs[slot_id],
(unsigned long long)dev->out_ctx_dma); (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
return 1; return 1;
fail: fail:
...@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev; struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx; struct xhci_ep_ctx *ep0_ctx;
struct usb_device *top_dev; struct usb_device *top_dev;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
dev = xhci->devs[udev->slot_id]; dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */ /* Slot ID 0 is reserved */
...@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
udev->slot_id); udev->slot_id);
return -EINVAL; return -EINVAL;
} }
ep0_ctx = &dev->in_ctx->ep[0]; ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
/* 2) New slot context and endpoint 0 context are valid*/ /* 2) New slot context and endpoint 0 context are valid*/
dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
/* 3) Only the control endpoint is valid - one endpoint context */ /* 3) Only the control endpoint is valid - one endpoint context */
dev->in_ctx->slot.dev_info |= LAST_CTX(1); slot_ctx->dev_info |= LAST_CTX(1);
switch (udev->speed) { switch (udev->speed) {
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
dev->in_ctx->slot.dev_info |= (u32) udev->route; slot_ctx->dev_info |= (u32) udev->route;
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
break; break;
case USB_SPEED_FULL: case USB_SPEED_FULL:
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
break; break;
case USB_SPEED_LOW: case USB_SPEED_LOW:
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
break; break;
case USB_SPEED_VARIABLE: case USB_SPEED_VARIABLE:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
...@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
for (top_dev = udev; top_dev->parent && top_dev->parent->parent; for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent) top_dev = top_dev->parent)
/* Found device below root hub */; /* Found device below root hub */;
dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
/* Is this a LS/FS device under a HS hub? */ /* Is this a LS/FS device under a HS hub? */
...@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
*/ */
if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
udev->tt) { udev->tt) {
dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; slot_ctx->tt_info = udev->tt->hub->slot_id;
dev->in_ctx->slot.tt_info |= udev->ttport << 8; slot_ctx->tt_info |= udev->ttport << 8;
} }
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
...@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
ep0_ctx->ep_info2 |= MAX_BURST(0); ep0_ctx->ep_info2 |= MAX_BURST(0);
ep0_ctx->ep_info2 |= ERROR_COUNT(3); ep0_ctx->ep_info2 |= ERROR_COUNT(3);
ep0_ctx->deq[0] = ep0_ctx->deq =
dev->ep_rings[0]->first_seg->dma; dev->ep_rings[0]->first_seg->dma;
ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
ep0_ctx->deq[1] = 0;
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
...@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
unsigned int max_burst; unsigned int max_burst;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = &virt_dev->in_ctx->ep[ep_index]; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */ /* Set up the endpoint ring */
virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->new_ep_rings[ep_index]) if (!virt_dev->new_ep_rings[ep_index])
return -ENOMEM; return -ENOMEM;
ep_ring = virt_dev->new_ep_rings[ep_index]; ep_ring = virt_dev->new_ep_rings[ep_index];
ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->deq[1] = 0;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
/* FIXME dig Mult and streams info out of ep companion desc */ /* FIXME dig Mult and streams info out of ep companion desc */
/* Allow 3 retries for everything but isoc */ /* Allow 3 retries for everything but isoc;
* error count = 0 means infinite retries.
*/
if (!usb_endpoint_xfer_isoc(&ep->desc)) if (!usb_endpoint_xfer_isoc(&ep->desc))
ep_ctx->ep_info2 = ERROR_COUNT(3); ep_ctx->ep_info2 = ERROR_COUNT(3);
else else
ep_ctx->ep_info2 = ERROR_COUNT(0); ep_ctx->ep_info2 = ERROR_COUNT(1);
ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
...@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_packet = ep->desc.wMaxPacketSize; max_packet = ep->desc.wMaxPacketSize;
ep_ctx->ep_info2 |= MAX_PACKET(max_packet); ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
/* dig out max burst from ep companion desc */ /* dig out max burst from ep companion desc */
max_packet = ep->ss_ep_comp->desc.bMaxBurst; if (!ep->ss_ep_comp) {
xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
max_packet = 0;
} else {
max_packet = ep->ss_ep_comp->desc.bMaxBurst;
}
ep_ctx->ep_info2 |= MAX_BURST(max_packet); ep_ctx->ep_info2 |= MAX_BURST(max_packet);
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
...@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, ...@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = &virt_dev->in_ctx->ep[ep_index]; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->ep_info = 0; ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0; ep_ctx->ep_info2 = 0;
ep_ctx->deq[0] = 0; ep_ctx->deq = 0;
ep_ctx->deq[1] = 0;
ep_ctx->tx_info = 0; ep_ctx->tx_info = 0;
/* Don't free the endpoint ring until the set interface or configuration /* Don't free the endpoint ring until the set interface or configuration
* request succeeds. * request succeeds.
*/ */
} }
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
if (!num_sp)
return 0;
xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
if (!xhci->scratchpad)
goto fail_sp;
xhci->scratchpad->sp_array =
pci_alloc_consistent(to_pci_dev(dev),
num_sp * sizeof(u64),
&xhci->scratchpad->sp_dma);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
xhci->scratchpad->sp_dma_buffers =
kzalloc(sizeof(dma_addr_t) * num_sp, flags);
if (!xhci->scratchpad->sp_dma_buffers)
goto fail_sp4;
xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
void *buf = pci_alloc_consistent(to_pci_dev(dev),
xhci->page_size, &dma);
if (!buf)
goto fail_sp5;
xhci->scratchpad->sp_array[i] = dma;
xhci->scratchpad->sp_buffers[i] = buf;
xhci->scratchpad->sp_dma_buffers[i] = dma;
}
return 0;
fail_sp5:
for (i = i - 1; i >= 0; i--) {
pci_free_consistent(to_pci_dev(dev), xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
fail_sp4:
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
fail_sp2:
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
fail_sp:
return -ENOMEM;
}
static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (!xhci->scratchpad)
return;
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
pci_free_consistent(pdev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
pci_free_consistent(pdev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
}
void xhci_mem_cleanup(struct xhci_hcd *xhci) void xhci_mem_cleanup(struct xhci_hcd *xhci)
{ {
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
...@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
/* Free the Event Ring Segment Table and the actual Event Ring */ /* Free the Event Ring Segment Table and the actual Event Ring */
xhci_writel(xhci, 0, &xhci->ir_set->erst_size); xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries) if (xhci->erst.entries)
pci_free_consistent(pdev, size, pci_free_consistent(pdev, size,
...@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->event_ring = NULL; xhci->event_ring = NULL;
xhci_dbg(xhci, "Freed event ring\n"); xhci_dbg(xhci, "Freed event ring\n");
xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
if (xhci->cmd_ring) if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring); xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL; xhci->cmd_ring = NULL;
...@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->device_pool = NULL; xhci->device_pool = NULL;
xhci_dbg(xhci, "Freed device context pool\n"); xhci_dbg(xhci, "Freed device context pool\n");
xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
if (xhci->dcbaa) if (xhci->dcbaa)
pci_free_consistent(pdev, sizeof(*xhci->dcbaa), pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa, xhci->dcbaa->dma);
...@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->page_size = 0; xhci->page_size = 0;
xhci->page_shift = 0; xhci->page_shift = 0;
scratchpad_free(xhci);
} }
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
...@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_addr_t dma; dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.controller; struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int val, val2; unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg; struct xhci_segment *seg;
u32 page_size; u32 page_size;
int i; int i;
...@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->dcbaa->dma = dma; xhci->dcbaa->dma = dma;
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
/* /*
* Initialize the ring segment pool. The ring must be a contiguous * Initialize the ring segment pool. The ring must be a contiguous
...@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/ */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
SEGMENT_SIZE, 64, xhci->page_size); SEGMENT_SIZE, 64, xhci->page_size);
/* See Table 46 and Note on Figure 55 */ /* See Table 46 and Note on Figure 55 */
/* FIXME support 64-byte contexts */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
sizeof(struct xhci_device_control), 2112, 64, xhci->page_size);
64, xhci->page_size);
if (!xhci->segment_pool || !xhci->device_pool) if (!xhci->segment_pool || !xhci->device_pool)
goto fail; goto fail;
...@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned long long)xhci->cmd_ring->first_seg->dma); (unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */ /* Set the address in the Command Ring Control register */
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val = (val & ~CMD_RING_ADDR_MASK) | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state; xhci->cmd_ring->cycle_state;
xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
xhci_dbg_cmd_ptrs(xhci); xhci_dbg_cmd_ptrs(xhci);
val = xhci_readl(xhci, &xhci->cap_regs->db_off); val = xhci_readl(xhci, &xhci->cap_regs->db_off);
...@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* set ring base address and size for each segment table entry */ /* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
struct xhci_erst_entry *entry = &xhci->erst.entries[val]; struct xhci_erst_entry *entry = &xhci->erst.entries[val];
entry->seg_addr[0] = seg->dma; entry->seg_addr = seg->dma;
entry->seg_addr[1] = 0;
entry->seg_size = TRBS_PER_SEGMENT; entry->seg_size = TRBS_PER_SEGMENT;
entry->rsvd = 0; entry->rsvd = 0;
seg = seg->next; seg = seg->next;
...@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* set the segment table base address */ /* set the segment table base address */
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
(unsigned long long)xhci->erst.erst_dma_addr); (unsigned long long)xhci->erst.erst_dma_addr);
val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val &= ERST_PTR_MASK; val_64 &= ERST_PTR_MASK;
val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
/* Set the event ring dequeue address */ /* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci); xhci_set_hc_event_deq(xhci);
...@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
for (i = 0; i < MAX_HC_SLOTS; ++i) for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = 0; xhci->devs[i] = 0;
if (scratchpad_alloc(xhci, flags))
goto fail;
return 0; return 0;
fail: fail:
xhci_warn(xhci, "Couldn't initialize memory\n"); xhci_warn(xhci, "Couldn't initialize memory\n");
xhci_mem_cleanup(xhci); xhci_mem_cleanup(xhci);
......
...@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { ...@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.free_dev = xhci_free_dev, .free_dev = xhci_free_dev,
.add_endpoint = xhci_add_endpoint, .add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint, .drop_endpoint = xhci_drop_endpoint,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth, .check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth, .reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device, .address_device = xhci_address_device,
......
此差异已折叠。
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/kernel.h>
#include "../core/hcd.h" #include "../core/hcd.h"
/* Code sharing between pci-quirks and xhci hcd */ /* Code sharing between pci-quirks and xhci hcd */
...@@ -42,14 +43,6 @@ ...@@ -42,14 +43,6 @@
* xHCI register interface. * xHCI register interface.
* This corresponds to the eXtensible Host Controller Interface (xHCI) * This corresponds to the eXtensible Host Controller Interface (xHCI)
* Revision 0.95 specification * Revision 0.95 specification
*
* Registers should always be accessed with double word or quad word accesses.
*
* Some xHCI implementations may support 64-bit address pointers. Registers
* with 64-bit address pointers should be written to with dword accesses by
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
* xHCI implementations that do not support 64-bit address pointers will ignore
* the high dword, and write order is irrelevant.
*/ */
/** /**
...@@ -96,6 +89,7 @@ struct xhci_cap_regs { ...@@ -96,6 +89,7 @@ struct xhci_cap_regs {
#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
/* HCSPARAMS3 - hcs_params3 - bitmasks */ /* HCSPARAMS3 - hcs_params3 - bitmasks */
/* bits 0:7, Max U1 to U0 latency for the roothub ports */ /* bits 0:7, Max U1 to U0 latency for the roothub ports */
...@@ -166,10 +160,10 @@ struct xhci_op_regs { ...@@ -166,10 +160,10 @@ struct xhci_op_regs {
u32 reserved1; u32 reserved1;
u32 reserved2; u32 reserved2;
u32 dev_notification; u32 dev_notification;
u32 cmd_ring[2]; u64 cmd_ring;
/* rsvd: offset 0x20-2F */ /* rsvd: offset 0x20-2F */
u32 reserved3[4]; u32 reserved3[4];
u32 dcbaa_ptr[2]; u64 dcbaa_ptr;
u32 config_reg; u32 config_reg;
/* rsvd: offset 0x3C-3FF */ /* rsvd: offset 0x3C-3FF */
u32 reserved4[241]; u32 reserved4[241];
...@@ -254,7 +248,7 @@ struct xhci_op_regs { ...@@ -254,7 +248,7 @@ struct xhci_op_regs {
#define CMD_RING_RUNNING (1 << 3) #define CMD_RING_RUNNING (1 << 3)
/* bits 4:5 reserved and should be preserved */ /* bits 4:5 reserved and should be preserved */
/* Command Ring pointer - bit mask for the lower 32 bits. */ /* Command Ring pointer - bit mask for the lower 32 bits. */
#define CMD_RING_ADDR_MASK (0xffffffc0) #define CMD_RING_RSVD_BITS (0x3f)
/* CONFIG - Configure Register - config_reg bitmasks */ /* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
...@@ -382,8 +376,8 @@ struct xhci_intr_reg { ...@@ -382,8 +376,8 @@ struct xhci_intr_reg {
u32 irq_control; u32 irq_control;
u32 erst_size; u32 erst_size;
u32 rsvd; u32 rsvd;
u32 erst_base[2]; u64 erst_base;
u32 erst_dequeue[2]; u64 erst_dequeue;
}; };
/* irq_pending bitmasks */ /* irq_pending bitmasks */
...@@ -452,6 +446,27 @@ struct xhci_doorbell_array { ...@@ -452,6 +446,27 @@ struct xhci_doorbell_array {
#define EPI_TO_DB(p) (((p) + 1) & 0xff) #define EPI_TO_DB(p) (((p) + 1) & 0xff)
/**
* struct xhci_container_ctx
* @type: Type of context. Used to calculated offsets to contained contexts.
* @size: Size of the context data
* @bytes: The raw context data given to HW
* @dma: dma address of the bytes
*
* Represents either a Device or Input context. Holds a pointer to the raw
* memory used for the context (bytes) and dma address of it (dma).
*/
struct xhci_container_ctx {
unsigned type;
#define XHCI_CTX_TYPE_DEVICE 0x1
#define XHCI_CTX_TYPE_INPUT 0x2
int size;
u8 *bytes;
dma_addr_t dma;
};
/** /**
* struct xhci_slot_ctx * struct xhci_slot_ctx
* @dev_info: Route string, device speed, hub info, and last valid endpoint * @dev_info: Route string, device speed, hub info, and last valid endpoint
...@@ -538,7 +553,7 @@ struct xhci_slot_ctx { ...@@ -538,7 +553,7 @@ struct xhci_slot_ctx {
struct xhci_ep_ctx { struct xhci_ep_ctx {
u32 ep_info; u32 ep_info;
u32 ep_info2; u32 ep_info2;
u32 deq[2]; u64 deq;
u32 tx_info; u32 tx_info;
/* offset 0x14 - 0x1f reserved for HC internal use */ /* offset 0x14 - 0x1f reserved for HC internal use */
u32 reserved[3]; u32 reserved[3];
...@@ -589,18 +604,16 @@ struct xhci_ep_ctx { ...@@ -589,18 +604,16 @@ struct xhci_ep_ctx {
/** /**
* struct xhci_device_control * struct xhci_input_control_context
* Input/Output context; see section 6.2.5. * Input control context; see section 6.2.5.
* *
* @drop_context: set the bit of the endpoint context you want to disable * @drop_context: set the bit of the endpoint context you want to disable
* @add_context: set the bit of the endpoint context you want to enable * @add_context: set the bit of the endpoint context you want to enable
*/ */
struct xhci_device_control { struct xhci_input_control_ctx {
u32 drop_flags; u32 drop_flags;
u32 add_flags; u32 add_flags;
u32 rsvd[6]; u32 rsvd2[6];
struct xhci_slot_ctx slot;
struct xhci_ep_ctx ep[31];
}; };
/* drop context bitmasks */ /* drop context bitmasks */
...@@ -608,7 +621,6 @@ struct xhci_device_control { ...@@ -608,7 +621,6 @@ struct xhci_device_control {
/* add context bitmasks */ /* add context bitmasks */
#define ADD_EP(x) (0x1 << x) #define ADD_EP(x) (0x1 << x)
struct xhci_virt_device { struct xhci_virt_device {
/* /*
* Commands to the hardware are passed an "input context" that * Commands to the hardware are passed an "input context" that
...@@ -618,11 +630,10 @@ struct xhci_virt_device { ...@@ -618,11 +630,10 @@ struct xhci_virt_device {
* track of input and output contexts separately because * track of input and output contexts separately because
* these commands might fail and we don't trust the hardware. * these commands might fail and we don't trust the hardware.
*/ */
struct xhci_device_control *out_ctx; struct xhci_container_ctx *out_ctx;
dma_addr_t out_ctx_dma;
/* Used for addressing devices and configuration changes */ /* Used for addressing devices and configuration changes */
struct xhci_device_control *in_ctx; struct xhci_container_ctx *in_ctx;
dma_addr_t in_ctx_dma;
/* FIXME when stream support is added */ /* FIXME when stream support is added */
struct xhci_ring *ep_rings[31]; struct xhci_ring *ep_rings[31];
/* Temporary storage in case the configure endpoint command fails and we /* Temporary storage in case the configure endpoint command fails and we
...@@ -641,7 +652,7 @@ struct xhci_virt_device { ...@@ -641,7 +652,7 @@ struct xhci_virt_device {
*/ */
struct xhci_device_context_array { struct xhci_device_context_array {
/* 64-bit device addresses; we only write 32-bit addresses */ /* 64-bit device addresses; we only write 32-bit addresses */
u32 dev_context_ptrs[2*MAX_HC_SLOTS]; u64 dev_context_ptrs[MAX_HC_SLOTS];
/* private xHCD pointers */ /* private xHCD pointers */
dma_addr_t dma; dma_addr_t dma;
}; };
...@@ -654,7 +665,7 @@ struct xhci_device_context_array { ...@@ -654,7 +665,7 @@ struct xhci_device_context_array {
struct xhci_stream_ctx { struct xhci_stream_ctx {
/* 64-bit stream ring address, cycle state, and stream type */ /* 64-bit stream ring address, cycle state, and stream type */
u32 stream_ring[2]; u64 stream_ring;
/* offset 0x14 - 0x1f reserved for HC internal use */ /* offset 0x14 - 0x1f reserved for HC internal use */
u32 reserved[2]; u32 reserved[2];
}; };
...@@ -662,7 +673,7 @@ struct xhci_stream_ctx { ...@@ -662,7 +673,7 @@ struct xhci_stream_ctx {
struct xhci_transfer_event { struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */ /* 64-bit buffer address, or immediate data */
u32 buffer[2]; u64 buffer;
u32 transfer_len; u32 transfer_len;
/* This field is interpreted differently based on the type of TRB */ /* This field is interpreted differently based on the type of TRB */
u32 flags; u32 flags;
...@@ -744,7 +755,7 @@ struct xhci_transfer_event { ...@@ -744,7 +755,7 @@ struct xhci_transfer_event {
struct xhci_link_trb { struct xhci_link_trb {
/* 64-bit segment pointer*/ /* 64-bit segment pointer*/
u32 segment_ptr[2]; u64 segment_ptr;
u32 intr_target; u32 intr_target;
u32 control; u32 control;
}; };
...@@ -755,7 +766,7 @@ struct xhci_link_trb { ...@@ -755,7 +766,7 @@ struct xhci_link_trb {
/* Command completion event TRB */ /* Command completion event TRB */
struct xhci_event_cmd { struct xhci_event_cmd {
/* Pointer to command TRB, or the value passed by the event data trb */ /* Pointer to command TRB, or the value passed by the event data trb */
u32 cmd_trb[2]; u64 cmd_trb;
u32 status; u32 status;
u32 flags; u32 flags;
}; };
...@@ -848,8 +859,8 @@ union xhci_trb { ...@@ -848,8 +859,8 @@ union xhci_trb {
#define TRB_CONFIG_EP 12 #define TRB_CONFIG_EP 12
/* Evaluate Context Command */ /* Evaluate Context Command */
#define TRB_EVAL_CONTEXT 13 #define TRB_EVAL_CONTEXT 13
/* Reset Transfer Ring Command */ /* Reset Endpoint Command */
#define TRB_RESET_RING 14 #define TRB_RESET_EP 14
/* Stop Transfer Ring Command */ /* Stop Transfer Ring Command */
#define TRB_STOP_RING 15 #define TRB_STOP_RING 15
/* Set Transfer Ring Dequeue Pointer Command */ /* Set Transfer Ring Dequeue Pointer Command */
...@@ -929,6 +940,7 @@ struct xhci_ring { ...@@ -929,6 +940,7 @@ struct xhci_ring {
unsigned int cancels_pending; unsigned int cancels_pending;
unsigned int state; unsigned int state;
#define SET_DEQ_PENDING (1 << 0) #define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1)
/* The TRB that was last reported in a stopped endpoint ring */ /* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb; union xhci_trb *stopped_trb;
struct xhci_td *stopped_td; struct xhci_td *stopped_td;
...@@ -940,9 +952,15 @@ struct xhci_ring { ...@@ -940,9 +952,15 @@ struct xhci_ring {
u32 cycle_state; u32 cycle_state;
}; };
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_erst_entry { struct xhci_erst_entry {
/* 64-bit event ring segment address */ /* 64-bit event ring segment address */
u32 seg_addr[2]; u64 seg_addr;
u32 seg_size; u32 seg_size;
/* Set to zero */ /* Set to zero */
u32 rsvd; u32 rsvd;
...@@ -957,6 +975,13 @@ struct xhci_erst { ...@@ -957,6 +975,13 @@ struct xhci_erst {
unsigned int erst_size; unsigned int erst_size;
}; };
struct xhci_scratchpad {
u64 *sp_array;
dma_addr_t sp_dma;
void **sp_buffers;
dma_addr_t *sp_dma_buffers;
};
/* /*
* Each segment table entry is 4*32bits long. 1K seems like an ok size: * Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
...@@ -1011,6 +1036,9 @@ struct xhci_hcd { ...@@ -1011,6 +1036,9 @@ struct xhci_hcd {
struct xhci_ring *cmd_ring; struct xhci_ring *cmd_ring;
struct xhci_ring *event_ring; struct xhci_ring *event_ring;
struct xhci_erst erst; struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
/* slot enabling and address device helpers */ /* slot enabling and address device helpers */
struct completion addr_dev; struct completion addr_dev;
int slot_id; int slot_id;
...@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, ...@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
static inline void xhci_writel(struct xhci_hcd *xhci, static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __u32 __iomem *regs) const unsigned int val, __u32 __iomem *regs)
{ {
if (!in_interrupt()) xhci_dbg(xhci,
xhci_dbg(xhci, "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
"`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", regs, val);
regs, val);
writel(val, regs); writel(val, regs);
} }
/*
* Registers should always be accessed with double word or quad word accesses.
*
* Some xHCI implementations may support 64-bit address pointers. Registers
* with 64-bit address pointers should be written to with dword accesses by
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
* xHCI implementations that do not support 64-bit address pointers will ignore
* the high dword, and write order is irrelevant.
*/
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
__u64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u64 val_lo = readl(ptr);
u64 val_hi = readl(ptr + 1);
return val_lo + (val_hi << 32);
}
static inline void xhci_write_64(struct xhci_hcd *xhci,
const u64 val, __u64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
xhci_dbg(xhci,
"`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
/* xHCI debugging */ /* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci); void xhci_print_registers(struct xhci_hcd *xhci);
...@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); ...@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
/* xHCI memory managment */ /* xHCI memory managment */
void xhci_mem_cleanup(struct xhci_hcd *xhci); void xhci_mem_cleanup(struct xhci_hcd *xhci);
...@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); ...@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
...@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, ...@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index); int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id); u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state);
/* xHCI roothub code */ /* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength); char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
/* xHCI contexts */
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
#endif /* __LINUX_XHCI_HCD_H */ #endif /* __LINUX_XHCI_HCD_H */
...@@ -220,7 +220,7 @@ config USB_IOWARRIOR ...@@ -220,7 +220,7 @@ config USB_IOWARRIOR
config USB_TEST config USB_TEST
tristate "USB testing driver" tristate "USB testing driver"
depends on USB && USB_DEVICEFS depends on USB
help help
This driver is for testing host controller software. It is used This driver is for testing host controller software. It is used
with specialized device firmware for regression and stress testing, with specialized device firmware for regression and stress testing,
......
...@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) ...@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
int i; int i;
/* log core options (read using indexed model) */ /* log core options (read using indexed model) */
musb_ep_select(mbase, 0);
reg = musb_read_configdata(mbase); reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
...@@ -1990,7 +1989,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) ...@@ -1990,7 +1989,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0) if (status < 0)
goto fail2; goto fail2;
#ifdef CONFIG_USB_OTG #ifdef CONFIG_USB_MUSB_OTG
setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
#endif #endif
......
...@@ -407,7 +407,7 @@ __acquires(musb->lock) ...@@ -407,7 +407,7 @@ __acquires(musb->lock)
csr |= MUSB_RXCSR_P_SENDSTALL csr |= MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_CLRDATATOG
| MUSB_TXCSR_P_WZC_BITS; | MUSB_RXCSR_P_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, musb_writew(regs, MUSB_RXCSR,
csr); csr);
} }
......
...@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) ...@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
static inline u8 musb_read_configdata(void __iomem *mbase) static inline u8 musb_read_configdata(void __iomem *mbase)
{ {
musb_writeb(mbase, MUSB_INDEX, 0);
return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
} }
......
...@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = { ...@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
...@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = { ...@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
......
...@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = { ...@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
{ }, /* Optional parameter entry */ { }, /* Optional parameter entry */
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
......
...@@ -946,6 +946,13 @@ ...@@ -946,6 +946,13 @@
#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
/*
* GN Otometrics (http://www.otometrics.com)
* Submitted by Ville Sundberg.
*/
#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
/* /*
* BmRequestType: 1100 0000b * BmRequestType: 1100 0000b
* bRequest: FTDI_E2_READ * bRequest: FTDI_E2_READ
......
...@@ -124,10 +124,13 @@ ...@@ -124,10 +124,13 @@
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
/* This driver also supports the ATEN UC2324 device since it is mos7840 based /* This driver also supports
* - if I knew the device id it would also support the ATEN UC2322 */ * ATEN UC2324 device using Moschip MCS7840
* ATEN UC2322 device using Moschip MCS7820
*/
#define USB_VENDOR_ID_ATENINTL 0x0557 #define USB_VENDOR_ID_ATENINTL 0x0557
#define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2324 0x2011
#define ATENINTL_DEVICE_ID_UC2322 0x7820
/* Interrupt Routine Defines */ /* Interrupt Routine Defines */
...@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = { ...@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */ {} /* terminating entry */
}; };
...@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { ...@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */ {} /* terminating entry */
}; };
......
...@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); ...@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file);
static int option_tiocmset(struct tty_struct *tty, struct file *file, static int option_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear); unsigned int set, unsigned int clear);
static int option_send_setup(struct usb_serial_port *port); static int option_send_setup(struct usb_serial_port *port);
#ifdef CONFIG_PM
static int option_suspend(struct usb_serial *serial, pm_message_t message); static int option_suspend(struct usb_serial *serial, pm_message_t message);
static int option_resume(struct usb_serial *serial); static int option_resume(struct usb_serial *serial);
#endif
/* Vendor and product IDs */ /* Vendor and product IDs */
#define OPTION_VENDOR_ID 0x0AF0 #define OPTION_VENDOR_ID 0x0AF0
...@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial); ...@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial);
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
#define NOVATELWIRELESS_PRODUCT_U727 0x5010 #define NOVATELWIRELESS_PRODUCT_U727 0x5010
#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 #define NOVATELWIRELESS_PRODUCT_MC760 0x6000
#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
...@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial); ...@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial);
#define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_VENDOR_ID 0x1726
#define AXESSTEL_PRODUCT_MV110H 0x1000 #define AXESSTEL_PRODUCT_MV110H 0x1000
#define ONDA_VENDOR_ID 0x19d2
#define ONDA_PRODUCT_MSA501HS 0x0001
#define ONDA_PRODUCT_ET502HS 0x0002
#define ONDA_PRODUCT_MT503HS 0x2000
#define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003 #define BANDRICH_PRODUCT_C100_2 0x1003
...@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial); ...@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial);
#define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710 0xfff1
#define BENQ_VENDOR_ID 0x04a5 #define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068 #define BENQ_PRODUCT_H10 0x4068
...@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial); ...@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial);
#define ALINK_VENDOR_ID 0x1e0e #define ALINK_VENDOR_ID 0x1e0e
#define ALINK_PRODUCT_3GU 0x9200 #define ALINK_PRODUCT_3GU 0x9200
/* ALCATEL PRODUCTS */
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S 0x0000
static struct usb_device_id option_ids[] = { static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
...@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = { ...@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
...@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = { ...@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0003) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0004) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0005) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0006) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0007) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0008) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0009) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000a) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000b) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000c) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000d) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000e) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000f) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0010) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0011) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0012) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0013) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0014) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0015) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0016) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0017) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0018) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0019) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0020) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0021) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0022) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0023) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0024) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0025) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0026) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0027) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0028) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0029) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) },
{ USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
...@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = { ...@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
...@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = { ...@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
MODULE_DEVICE_TABLE(usb, option_ids); MODULE_DEVICE_TABLE(usb, option_ids);
...@@ -555,8 +590,10 @@ static struct usb_driver option_driver = { ...@@ -555,8 +590,10 @@ static struct usb_driver option_driver = {
.name = "option", .name = "option",
.probe = usb_serial_probe, .probe = usb_serial_probe,
.disconnect = usb_serial_disconnect, .disconnect = usb_serial_disconnect,
#ifdef CONFIG_PM
.suspend = usb_serial_suspend, .suspend = usb_serial_suspend,
.resume = usb_serial_resume, .resume = usb_serial_resume,
#endif
.id_table = option_ids, .id_table = option_ids,
.no_dynamic_id = 1, .no_dynamic_id = 1,
}; };
...@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = { ...@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = {
.disconnect = option_disconnect, .disconnect = option_disconnect,
.release = option_release, .release = option_release,
.read_int_callback = option_instat_callback, .read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = option_suspend, .suspend = option_suspend,
.resume = option_resume, .resume = option_resume,
#endif
}; };
static int debug; static int debug;
...@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb) ...@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb)
int status = urb->status; int status = urb->status;
struct usb_serial_port *port = urb->context; struct usb_serial_port *port = urb->context;
struct option_port_private *portdata = usb_get_serial_port_data(port); struct option_port_private *portdata = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
dbg("%s", __func__); dbg("%s", __func__);
dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
...@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty, ...@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp) struct usb_serial_port *port, struct file *filp)
{ {
struct option_port_private *portdata; struct option_port_private *portdata;
struct usb_serial *serial = port->serial;
int i, err; int i, err;
struct urb *urb; struct urb *urb;
...@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial) ...@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial)
} }
} }
#ifdef CONFIG_PM
static int option_suspend(struct usb_serial *serial, pm_message_t message) static int option_suspend(struct usb_serial *serial, pm_message_t message)
{ {
dbg("%s entered", __func__); dbg("%s entered", __func__);
...@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial) ...@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial)
} }
return 0; return 0;
} }
#endif
MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC); MODULE_DESCRIPTION(DRIVER_DESC);
......
...@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) ...@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
US_BULK_GET_MAX_LUN, US_BULK_GET_MAX_LUN,
USB_DIR_IN | USB_TYPE_CLASS | USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE, USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, 1, HZ); 0, us->ifnum, us->iobuf, 1, 10*HZ);
US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]); result, us->iobuf[0]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册