提交 dccf4a48 编写于 作者: A Alan Stern 提交者: Greg Kroah-Hartman

[PATCH] UHCI: use one QH per endpoint, not per URB

This patch (as623) changes the uhci-hcd driver to make it use one QH per
device endpoint, instead of a QH per URB as it does now.  Numerous areas
of the code are affected by this.  For example, the distinction between
"queued" URBs and non-"queued" URBs no longer exists; all URBs belong to
a queue and some just happen to be at the queue's head.
Signed-off-by: NAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 499003e8
......@@ -90,13 +90,60 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
return out - buf;
}
static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
{
char *out = buf;
struct urb_priv *urbp;
struct list_head *head, *tmp;
struct uhci_td *td;
int i = 0, checked = 0, prevactive = 0;
int i, nactive, ninactive;
if (len < 200)
return 0;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
out += sprintf(out, "qh [%p] ", urbp->qh);
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO"); break;
case PIPE_INTERRUPT: out += sprintf(out, "INT"); break;
case PIPE_BULK: out += sprintf(out, "BLK"); break;
case PIPE_CONTROL: out += sprintf(out, "CTL"); break;
}
out += sprintf(out, "%s", (urbp->fsbr ? " FSBR" : ""));
out += sprintf(out, "%s", (urbp->fsbr_timeout ? " FSBR_TO" : ""));
if (urbp->urb->status != -EINPROGRESS)
out += sprintf(out, " Status=%d", urbp->urb->status);
out += sprintf(out, "\n");
i = nactive = ninactive = 0;
list_for_each_entry(td, &urbp->td_list, list) {
if (++i <= 10 || debug > 2) {
out += sprintf(out, "%*s%d: ", space + 2, "", i);
out += uhci_show_td(td, out, len - (out - buf), 0);
} else {
if (td_status(td) & TD_CTRL_ACTIVE)
++nactive;
else
++ninactive;
}
}
if (nactive + ninactive > 0)
out += sprintf(out, "%*s[skipped %d inactive and %d active "
"TDs]\n",
space, "", ninactive, nactive);
return out - buf;
}
static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
{
char *out = buf;
int i, nurbs;
__le32 element = qh_element(qh);
/* Try to make sure there's enough memory */
......@@ -118,86 +165,36 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
if (!qh->urbp) {
out += sprintf(out, "%*s urbp == NULL\n", space, "");
goto out;
}
urbp = qh->urbp;
head = &urbp->td_list;
tmp = head->next;
td = list_entry(tmp, struct uhci_td, list);
if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS))
out += sprintf(out, "%*s Element != First TD\n", space, "");
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
out += sprintf(out, "%*s%d: ", space + 2, "", i++);
out += uhci_show_td(td, out, len - (out - buf), 0);
if (i > 10 && !checked && prevactive && tmp != head &&
debug <= 2) {
struct list_head *ntmp = tmp;
struct uhci_td *ntd = td;
int active = 1, ni = i;
checked = 1;
while (ntmp != head && ntmp->next != head && active) {
ntd = list_entry(ntmp, struct uhci_td, list);
ntmp = ntmp->next;
active = td_status(ntd) & TD_CTRL_ACTIVE;
ni++;
}
if (active && ni > i) {
out += sprintf(out, "%*s[skipped %d active TDs]\n", space, "", ni - i);
tmp = ntmp;
td = ntd;
i = ni;
}
if (list_empty(&qh->queue)) {
out += sprintf(out, "%*s queue is empty\n", space, "");
} else {
struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS))
out += sprintf(out, "%*s Element != First TD\n",
space, "");
i = nurbs = 0;
list_for_each_entry(urbp, &qh->queue, node) {
if (++i <= 10)
out += uhci_show_urbp(urbp, out,
len - (out - buf), space + 2);
else
++nurbs;
}
prevactive = td_status(td) & TD_CTRL_ACTIVE;
}
if (list_empty(&urbp->queue_list) || urbp->queued)
goto out;
out += sprintf(out, "%*sQueued QHs:\n", -space, "--");
head = &urbp->queue_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *nurbp = list_entry(tmp, struct urb_priv,
queue_list);
tmp = tmp->next;
out += uhci_show_qh(nurbp->qh, out, len - (out - buf), space);
if (nurbs > 0)
out += sprintf(out, "%*s Skipped %d URBs\n",
space, "", nurbs);
}
out:
return out - buf;
}
#define show_frame_num() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- Frame %d\n", i); \
}
#ifdef CONFIG_PROC_FS
static const char * const qh_names[] = {
"skel_unlink_qh", "skel_iso_qh",
"skel_int128_qh", "skel_int64_qh",
"skel_int32_qh", "skel_int16_qh",
"skel_int8_qh", "skel_int4_qh",
......@@ -206,12 +203,6 @@ static const char * const qh_names[] = {
"skel_bulk_qh", "skel_term_qh"
};
#define show_qh_name() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- %s\n", qh_names[i]); \
}
static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
{
char *out = buf;
......@@ -321,139 +312,29 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
return out - buf;
}
static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, char *buf, int len)
{
struct list_head *tmp;
char *out = buf;
int count = 0;
if (len < 200)
return 0;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
out += sprintf(out, "qh [%p] ", urbp->qh);
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO "); break;
case PIPE_INTERRUPT: out += sprintf(out, "INT "); break;
case PIPE_BULK: out += sprintf(out, "BLK "); break;
case PIPE_CONTROL: out += sprintf(out, "CTL "); break;
}
out += sprintf(out, "%s", (urbp->fsbr ? "FSBR " : ""));
out += sprintf(out, "%s", (urbp->fsbr_timeout ? "FSBR_TO " : ""));
if (urbp->urb->status != -EINPROGRESS)
out += sprintf(out, "Status=%d ", urbp->urb->status);
//out += sprintf(out, "FSBRtime=%lx ",urbp->fsbrtime);
count = 0;
list_for_each(tmp, &urbp->td_list)
count++;
out += sprintf(out, "TDs=%d ",count);
if (urbp->queued)
out += sprintf(out, "queued\n");
else {
count = 0;
list_for_each(tmp, &urbp->queue_list)
count++;
out += sprintf(out, "queued URBs=%d\n", count);
}
return out - buf;
}
static int uhci_show_lists(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
struct list_head *head, *tmp;
int count;
out += sprintf(out, "Main list URBs:");
if (list_empty(&uhci->urb_list))
out += sprintf(out, " Empty\n");
else {
out += sprintf(out, "\n");
count = 0;
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
out += sprintf(out, " %d: ", ++count);
out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
tmp = tmp->next;
}
}
out += sprintf(out, "Remove list URBs:");
if (list_empty(&uhci->urb_remove_list))
out += sprintf(out, " Empty\n");
else {
out += sprintf(out, "\n");
count = 0;
head = &uhci->urb_remove_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
out += sprintf(out, " %d: ", ++count);
out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
tmp = tmp->next;
}
}
out += sprintf(out, "Complete list URBs:");
if (list_empty(&uhci->complete_list))
out += sprintf(out, " Empty\n");
else {
out += sprintf(out, "\n");
count = 0;
head = &uhci->complete_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
out += sprintf(out, " %d: ", ++count);
out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
tmp = tmp->next;
}
}
return out - buf;
}
static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
{
unsigned long flags;
char *out = buf;
int i, j;
struct uhci_qh *qh;
struct uhci_td *td;
struct list_head *tmp, *head;
spin_lock_irqsave(&uhci->lock, flags);
out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf));
if (debug <= 1)
return out - buf;
out += sprintf(out, "Frame List\n");
for (i = 0; i < UHCI_NUMFRAMES; ++i) {
int shown = 0;
td = uhci->frame_cpu[i];
if (!td)
continue;
if (td->dma_handle != (dma_addr_t)uhci->frame[i]) {
show_frame_num();
out += sprintf(out, "- Frame %d\n", i); \
if (td->dma_handle != (dma_addr_t)uhci->frame[i])
out += sprintf(out, " frame list does not match td->dma_handle!\n");
}
show_frame_num();
head = &td->fl_list;
tmp = head;
......@@ -467,14 +348,11 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
out += sprintf(out, "Skeleton QHs\n");
for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
int shown = 0;
int cnt = 0;
qh = uhci->skelqh[i];
if (debug > 1) {
show_qh_name();
out += uhci_show_qh(qh, out, len - (out - buf), 4);
}
out += sprintf(out, "- %s\n", qh_names[i]); \
out += uhci_show_qh(qh, out, len - (out - buf), 4);
/* Last QH is the Terminating QH, it's different */
if (i == UHCI_NUM_SKELQH - 1) {
......@@ -487,44 +365,27 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
continue;
}
j = (i < 7) ? 7 : i+1; /* Next skeleton */
if (list_empty(&qh->list)) {
if (i < UHCI_NUM_SKELQH - 1) {
if (qh->link !=
(cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH)) {
show_qh_name();
out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n");
}
}
continue;
}
show_qh_name();
head = &qh->list;
j = (i < 9) ? 9 : i+1; /* Next skeleton */
head = &qh->node;
tmp = head->next;
while (tmp != head) {
qh = list_entry(tmp, struct uhci_qh, list);
qh = list_entry(tmp, struct uhci_qh, node);
tmp = tmp->next;
out += uhci_show_qh(qh, out, len - (out - buf), 4);
if (++cnt <= 10)
out += uhci_show_qh(qh, out,
len - (out - buf), 4);
}
if ((cnt -= 10) > 0)
out += sprintf(out, " Skipped %d QHs\n", cnt);
if (i < UHCI_NUM_SKELQH - 1) {
if (i > 1 && i < UHCI_NUM_SKELQH - 1) {
if (qh->link !=
(cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH))
out += sprintf(out, " last QH not linked to next skeleton!\n");
}
}
if (debug > 2)
out += uhci_show_lists(uhci, out, len - (out - buf));
spin_unlock_irqrestore(&uhci->lock, flags);
return out - buf;
}
......@@ -541,6 +402,7 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
struct uhci_hcd *uhci = inode->u.generic_ip;
struct uhci_debug *up;
int ret = -ENOMEM;
unsigned long flags;
lock_kernel();
up = kmalloc(sizeof(*up), GFP_KERNEL);
......@@ -553,7 +415,9 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
goto out;
}
spin_lock_irqsave(&uhci->lock, flags);
up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
spin_unlock_irqrestore(&uhci->lock, flags);
file->private_data = up;
......
......@@ -54,7 +54,7 @@
/*
* Version Information
*/
#define DRIVER_VERSION "v2.3"
#define DRIVER_VERSION "v3.0"
#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
Alan Stern"
......@@ -489,15 +489,11 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->fsbrtimeout = 0;
spin_lock_init(&uhci->lock);
INIT_LIST_HEAD(&uhci->qh_remove_list);
INIT_LIST_HEAD(&uhci->td_remove_list);
INIT_LIST_HEAD(&uhci->urb_remove_list);
INIT_LIST_HEAD(&uhci->urb_list);
INIT_LIST_HEAD(&uhci->complete_list);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
......@@ -540,7 +536,7 @@ static int uhci_start(struct usb_hcd *hcd)
}
for (i = 0; i < UHCI_NUM_SKELQH; i++) {
uhci->skelqh[i] = uhci_alloc_qh(uhci);
uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL);
if (!uhci->skelqh[i]) {
dev_err(uhci_dev(uhci), "unable to allocate QH\n");
goto err_alloc_skelqh;
......@@ -557,13 +553,17 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->skel_int16_qh->link =
uhci->skel_int8_qh->link =
uhci->skel_int4_qh->link =
uhci->skel_int2_qh->link =
cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_int2_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_int1_qh->dma_handle);
uhci->skel_int1_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_ls_control_qh->dma_handle);
uhci->skel_ls_control_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_fs_control_qh->dma_handle);
uhci->skel_fs_control_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_bulk_qh->dma_handle);
uhci->skel_bulk_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_term_qh->dma_handle);
/* This dummy TD is to work around a bug in Intel PIIX controllers */
uhci_fill_td(uhci->term_td, 0, uhci_explen(0) |
......@@ -589,15 +589,15 @@ static int uhci_start(struct usb_hcd *hcd)
/*
* ffs (Find First bit Set) does exactly what we need:
* 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6],
* 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
* ffs > 6 => not on any high-period queue, so use
* skel_int1_qh = skelqh[7].
* 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8],
* 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc.
* ffs >= 7 => not on any high-period queue, so use
* skel_int1_qh = skelqh[9].
* Add UHCI_NUMFRAMES to insure at least one bit is set.
*/
irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
if (irq < 0)
irq = 7;
irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES);
if (irq <= 1)
irq = 9;
/* Only place we don't use the frame list routines */
uhci->frame[i] = UHCI_PTR_QH |
......@@ -767,13 +767,30 @@ static int uhci_resume(struct usb_hcd *hcd)
}
#endif
/* Wait until all the URBs for a particular device/endpoint are gone */
/* Wait until a particular device/endpoint's QH is idle, and free it */
static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
struct usb_host_endpoint *hep)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct uhci_qh *qh;
spin_lock_irq(&uhci->lock);
qh = (struct uhci_qh *) hep->hcpriv;
if (qh == NULL)
goto done;
wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list));
while (qh->state != QH_STATE_IDLE) {
++uhci->num_waiting;
spin_unlock_irq(&uhci->lock);
wait_event_interruptible(uhci->waitqh,
qh->state == QH_STATE_IDLE);
spin_lock_irq(&uhci->lock);
--uhci->num_waiting;
}
uhci_free_qh(uhci, qh);
done:
spin_unlock_irq(&uhci->lock);
}
static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
......
......@@ -28,8 +28,9 @@
#define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */
#define USBSTS_ERROR 0x0002 /* Interrupt due to error */
#define USBSTS_RD 0x0004 /* Resume Detect */
#define USBSTS_HSE 0x0008 /* Host System Error - basically PCI problems */
#define USBSTS_HCPE 0x0010 /* Host Controller Process Error - the scripts were buggy */
#define USBSTS_HSE 0x0008 /* Host System Error: PCI problems */
#define USBSTS_HCPE 0x0010 /* Host Controller Process Error:
* the schedule is buggy */
#define USBSTS_HCH 0x0020 /* HC Halted */
/* Interrupt enable register */
......@@ -47,7 +48,8 @@
/* USB port status and control registers */
#define USBPORTSC1 16
#define USBPORTSC2 18
#define USBPORTSC_CCS 0x0001 /* Current Connect Status ("device present") */
#define USBPORTSC_CCS 0x0001 /* Current Connect Status
* ("device present") */
#define USBPORTSC_CSC 0x0002 /* Connect Status Change */
#define USBPORTSC_PE 0x0004 /* Port Enable */
#define USBPORTSC_PEC 0x0008 /* Port Enable Change */
......@@ -71,15 +73,16 @@
#define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
#define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
#define UHCI_PTR_BITS cpu_to_le32(0x000F)
#define UHCI_PTR_TERM cpu_to_le32(0x0001)
#define UHCI_PTR_QH cpu_to_le32(0x0002)
#define UHCI_PTR_DEPTH cpu_to_le32(0x0004)
#define UHCI_PTR_BREADTH cpu_to_le32(0x0000)
#define UHCI_PTR_BITS __constant_cpu_to_le32(0x000F)
#define UHCI_PTR_TERM __constant_cpu_to_le32(0x0001)
#define UHCI_PTR_QH __constant_cpu_to_le32(0x0002)
#define UHCI_PTR_DEPTH __constant_cpu_to_le32(0x0004)
#define UHCI_PTR_BREADTH __constant_cpu_to_le32(0x0000)
#define UHCI_NUMFRAMES 1024 /* in the frame list [array] */
#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
#define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */
#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames
* can be scheduled */
/*
......@@ -87,38 +90,54 @@
*/
/*
* One role of a QH is to hold a queue of TDs for some endpoint. Each QH is
* used with one URB, and qh->element (updated by the HC) is either:
* - the next unprocessed TD for the URB, or
* - UHCI_PTR_TERM (when there's no more traffic for this endpoint), or
* - the QH for the next URB queued to the same endpoint.
* One role of a QH is to hold a queue of TDs for some endpoint. One QH goes
* with each endpoint, and qh->element (updated by the HC) is either:
* - the next unprocessed TD in the endpoint's queue, or
* - UHCI_PTR_TERM (when there's no more traffic for this endpoint).
*
* The other role of a QH is to serve as a "skeleton" framelist entry, so we
* can easily splice a QH for some endpoint into the schedule at the right
* place. Then qh->element is UHCI_PTR_TERM.
*
* In the frame list, qh->link maintains a list of QHs seen by the HC:
* In the schedule, qh->link maintains a list of QHs seen by the HC:
* skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ...
*
* qh->node is the software equivalent of qh->link. The differences
* are that the software list is doubly-linked and QHs in the UNLINKING
* state are on the software list but not the hardware schedule.
*
* For bookkeeping purposes we maintain QHs even for Isochronous endpoints,
* but they never get added to the hardware schedule.
*/
#define QH_STATE_IDLE 1 /* QH is not being used */
#define QH_STATE_UNLINKING 2 /* QH has been removed from the
* schedule but the hardware may
* still be using it */
#define QH_STATE_ACTIVE 3 /* QH is on the schedule */
struct uhci_qh {
/* Hardware fields */
__le32 link; /* Next queue */
__le32 element; /* Queue element pointer */
__le32 link; /* Next QH in the schedule */
__le32 element; /* Queue element (TD) pointer */
/* Software fields */
dma_addr_t dma_handle;
struct urb_priv *urbp;
struct list_head node; /* Node in the list of QHs */
struct usb_host_endpoint *hep; /* Endpoint information */
struct usb_device *udev;
struct list_head queue; /* Queue of urbps for this QH */
struct uhci_qh *skel; /* Skeleton for this QH */
struct list_head list;
struct list_head remove_list;
unsigned int unlink_frame; /* When the QH was unlinked */
int state; /* QH_STATE_xxx; see above */
} __attribute__((aligned(16)));
/*
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
static __le32 inline qh_element(struct uhci_qh *qh) {
static inline __le32 qh_element(struct uhci_qh *qh) {
__le32 element = qh->element;
barrier();
......@@ -149,11 +168,13 @@ static __le32 inline qh_element(struct uhci_qh *qh) {
#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF)
TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \
TD_CTRL_BITSTUFF)
#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000)
#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */
#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \
TD_CTRL_ACTLEN_MASK) /* 1-based */
/*
* for TD <info>: (a.k.a. Token)
......@@ -163,7 +184,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) {
#define TD_TOKEN_TOGGLE_SHIFT 19
#define TD_TOKEN_TOGGLE (1 << 19)
#define TD_TOKEN_EXPLEN_SHIFT 21
#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n - 1 */
#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n-1 */
#define TD_TOKEN_PID_MASK 0xFF
#define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \
......@@ -187,7 +208,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) {
* sw space after the TD entry.
*
* td->link points to either another TD (not necessarily for the same urb or
* even the same endpoint), or nothing (PTR_TERM), or a QH (for queued urbs).
* even the same endpoint), or nothing (PTR_TERM), or a QH.
*/
struct uhci_td {
/* Hardware fields */
......@@ -210,7 +231,7 @@ struct uhci_td {
* We need a special accessor for the control/status word because it is
* subject to asynchronous updates by the controller.
*/
static u32 inline td_status(struct uhci_td *td) {
static inline u32 td_status(struct uhci_td *td) {
__le32 status = td->status;
barrier();
......@@ -223,17 +244,14 @@ static u32 inline td_status(struct uhci_td *td) {
*/
/*
* The UHCI driver places Interrupt, Control and Bulk into QHs both
* to group together TDs for one transfer, and also to facilitate queuing
* of URBs. To make it easy to insert entries into the schedule, we have
* a skeleton of QHs for each predefined Interrupt latency, low-speed
* control, full-speed control and terminating QH (see explanation for
* the terminating QH below).
* The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for
* automatic queuing. To make it easy to insert entries into the schedule,
* we have a skeleton of QHs for each predefined Interrupt latency,
* low-speed control, full-speed control, bulk, and terminating QH
* (see explanation for the terminating QH below).
*
* When we want to add a new QH, we add it to the end of the list for the
* skeleton QH.
*
* For instance, the queue can look like this:
* skeleton QH. For instance, the schedule list can look like this:
*
* skel int128 QH
* dev 1 interrupt QH
......@@ -256,26 +274,31 @@ static u32 inline td_status(struct uhci_td *td) {
* - To loop back to the full-speed control queue for full-speed bandwidth
* reclamation.
*
* Isochronous transfers are stored before the start of the skeleton
* schedule and don't use QHs. While the UHCI spec doesn't forbid the
* use of QHs for Isochronous, it doesn't use them either. And the spec
* says that queues never advance on an error completion status, which
* makes them totally unsuitable for Isochronous transfers.
* There's a special skeleton QH for Isochronous QHs. It never appears
* on the schedule, and Isochronous TDs go on the schedule before the
* the skeleton QHs. The hardware accesses them directly rather than
* through their QH, which is used only for bookkeeping purposes.
* While the UHCI spec doesn't forbid the use of QHs for Isochronous,
* it doesn't use them either. And the spec says that queues never
* advance on an error completion status, which makes them totally
* unsuitable for Isochronous transfers.
*/
#define UHCI_NUM_SKELQH 12
#define skel_int128_qh skelqh[0]
#define skel_int64_qh skelqh[1]
#define skel_int32_qh skelqh[2]
#define skel_int16_qh skelqh[3]
#define skel_int8_qh skelqh[4]
#define skel_int4_qh skelqh[5]
#define skel_int2_qh skelqh[6]
#define skel_int1_qh skelqh[7]
#define skel_ls_control_qh skelqh[8]
#define skel_fs_control_qh skelqh[9]
#define skel_bulk_qh skelqh[10]
#define skel_term_qh skelqh[11]
#define UHCI_NUM_SKELQH 14
#define skel_unlink_qh skelqh[0]
#define skel_iso_qh skelqh[1]
#define skel_int128_qh skelqh[2]
#define skel_int64_qh skelqh[3]
#define skel_int32_qh skelqh[4]
#define skel_int16_qh skelqh[5]
#define skel_int8_qh skelqh[6]
#define skel_int4_qh skelqh[7]
#define skel_int2_qh skelqh[8]
#define skel_int1_qh skelqh[9]
#define skel_ls_control_qh skelqh[10]
#define skel_fs_control_qh skelqh[11]
#define skel_bulk_qh skelqh[12]
#define skel_term_qh skelqh[13]
/*
* Search tree for determining where <interval> fits in the skelqh[]
......@@ -293,21 +316,21 @@ static inline int __interval_to_skel(int interval)
if (interval < 16) {
if (interval < 4) {
if (interval < 2)
return 7; /* int1 for 0-1 ms */
return 6; /* int2 for 2-3 ms */
return 9; /* int1 for 0-1 ms */
return 8; /* int2 for 2-3 ms */
}
if (interval < 8)
return 5; /* int4 for 4-7 ms */
return 4; /* int8 for 8-15 ms */
return 7; /* int4 for 4-7 ms */
return 6; /* int8 for 8-15 ms */
}
if (interval < 64) {
if (interval < 32)
return 3; /* int16 for 16-31 ms */
return 2; /* int32 for 32-63 ms */
return 5; /* int16 for 16-31 ms */
return 4; /* int32 for 32-63 ms */
}
if (interval < 128)
return 1; /* int64 for 64-127 ms */
return 0; /* int128 for 128-255 ms (Max.) */
return 3; /* int64 for 64-127 ms */
return 2; /* int128 for 128-255 ms (Max.) */
}
......@@ -363,12 +386,12 @@ struct uhci_hcd {
spinlock_t lock;
dma_addr_t frame_dma_handle; /* Hardware frame list */
dma_addr_t frame_dma_handle; /* Hardware frame list */
__le32 *frame;
void **frame_cpu; /* CPU's frame list */
void **frame_cpu; /* CPU's frame list */
int fsbr; /* Full-speed bandwidth reclamation */
unsigned long fsbrtimeout; /* FSBR delay */
int fsbr; /* Full-speed bandwidth reclamation */
unsigned long fsbrtimeout; /* FSBR delay */
enum uhci_rh_state rh_state;
unsigned long auto_stop_time; /* When to AUTO_STOP */
......@@ -392,24 +415,19 @@ struct uhci_hcd {
/* Main list of URBs currently controlled by this HC */
struct list_head urb_list;
/* List of QHs that are done, but waiting to be unlinked (race) */
struct list_head qh_remove_list;
unsigned int qh_remove_age; /* Age in frames */
/* List of TDs that are done, but waiting to be freed (race) */
struct list_head td_remove_list;
unsigned int td_remove_age; /* Age in frames */
/* List of asynchronously unlinked URBs */
struct list_head urb_remove_list;
unsigned int urb_remove_age; /* Age in frames */
/* List of URBs awaiting completion callback */
struct list_head complete_list;
struct list_head idle_qh_list; /* Where the idle QHs live */
int rh_numports; /* Number of root-hub ports */
wait_queue_head_t waitqh; /* endpoint_disable waiters */
int num_waiting; /* Number of waiters */
};
/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
......@@ -430,22 +448,19 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci)
*/
struct urb_priv {
struct list_head urb_list;
struct list_head node; /* Node in the QH's urbp list */
struct urb *urb;
struct uhci_qh *qh; /* QH for this URB */
struct list_head td_list;
unsigned fsbr : 1; /* URB turned on FSBR */
unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
unsigned queued : 1; /* QH was queued (not linked in) */
unsigned short_control_packet : 1; /* If we get a short packet during */
/* a control transfer, retrigger */
/* the status phase */
unsigned long fsbrtime; /* In jiffies */
struct list_head queue_list;
unsigned fsbr : 1; /* URB turned on FSBR */
unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
unsigned short_transfer : 1; /* URB got a short transfer, no
* need to rescan */
};
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册