提交 74f9fe21 编写于 作者: S Sarah Sharp 提交者: Greg Kroah-Hartman

USB: xhci: Make reverting an alt setting "unfailable".

When a driver wants to switch to a different alternate setting for an
interface, the USB core will (soon) check whether there is enough
bandwidth.  Once the new alternate setting is installed in the xHCI
hardware, the USB core will send a USB_REQ_SET_INTERFACE control
message.  That can fail in various ways, and the USB core needs to be
able to reinstate the old alternate setting.

With the old code, reinstating the old alt setting could fail if the
there's not enough memory to allocate new endpoint rings.  Keep
around a cache of (at most 31) endpoint rings for this case.  When we
successfully switch the xHCI hardware to the new alt setting, the old
alt setting's rings will be stored in the cache.  Therefore we'll
always have enough rings to satisfy a conversion back to a previous
device setting.
Signed-off-by: NSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 3342ecda
...@@ -1262,14 +1262,36 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1262,14 +1262,36 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
xhci_zero_in_ctx(xhci, virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
/* Free any old rings */ /* Install new rings and free or cache any old rings */
for (i = 1; i < 31; ++i) { for (i = 1; i < 31; ++i) {
if (virt_dev->eps[i].new_ring) { int rings_cached;
if (!virt_dev->eps[i].new_ring)
continue;
/* Only cache or free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
rings_cached = virt_dev->num_rings_cached;
if (rings_cached < XHCI_MAX_RINGS_CACHED) {
virt_dev->num_rings_cached++;
rings_cached = virt_dev->num_rings_cached;
virt_dev->ring_cache[rings_cached] =
virt_dev->eps[i].ring;
xhci_dbg(xhci, "Cached old ring, "
"%d ring%s cached\n",
rings_cached,
(rings_cached > 1) ? "s" : "");
} else {
xhci_ring_free(xhci, virt_dev->eps[i].ring); xhci_ring_free(xhci, virt_dev->eps[i].ring);
xhci_dbg(xhci, "Ring cache full (%d rings), "
"freeing ring\n",
virt_dev->num_rings_cached);
}
}
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL; virt_dev->eps[i].new_ring = NULL;
} }
}
return ret; return ret;
} }
......
...@@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) ...@@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
kfree(ring); kfree(ring);
} }
static void xhci_initialize_ring_info(struct xhci_ring *ring)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
ring->enq_seg = ring->first_seg;
ring->dequeue = ring->enqueue;
ring->deq_seg = ring->first_seg;
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*/
ring->cycle_state = 1;
/* Not necessary for new rings, but needed for re-initialized rings */
ring->enq_updates = 0;
ring->deq_updates = 0;
}
/** /**
* Create a new ring with zero or more segments. * Create a new ring with zero or more segments.
* *
...@@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
" segment %p (virtual), 0x%llx (DMA)\n", " segment %p (virtual), 0x%llx (DMA)\n",
prev, (unsigned long long)prev->dma); prev, (unsigned long long)prev->dma);
} }
/* The ring is empty, so the enqueue pointer == dequeue pointer */ xhci_initialize_ring_info(ring);
ring->enqueue = ring->first_seg->trbs;
ring->enq_seg = ring->first_seg;
ring->dequeue = ring->enqueue;
ring->deq_seg = ring->first_seg;
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*/
ring->cycle_state = 1;
return ring; return ring;
fail: fail:
...@@ -191,6 +198,27 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -191,6 +198,27 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0; return 0;
} }
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring)
{
struct xhci_segment *seg = ring->first_seg;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, 1);
seg = seg->next;
} while (seg != ring->first_seg);
xhci_initialize_ring_info(ring);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
*/
INIT_LIST_HEAD(&ring->td_list);
}
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
...@@ -276,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -276,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->eps[i].ring) if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring); xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
xhci_ring_free(xhci, dev->ring_cache[i]);
kfree(dev->ring_cache);
}
if (dev->in_ctx) if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx); xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx) if (dev->out_ctx)
...@@ -329,6 +363,14 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -329,6 +363,14 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev->eps[0].ring) if (!dev->eps[0].ring)
goto fail; goto fail;
/* Allocate pointers to the ring cache */
dev->ring_cache = kzalloc(
sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
flags);
if (!dev->ring_cache)
goto fail;
dev->num_rings_cached = 0;
init_completion(&dev->cmd_completion); init_completion(&dev->cmd_completion);
INIT_LIST_HEAD(&dev->cmd_list); INIT_LIST_HEAD(&dev->cmd_list);
...@@ -555,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -555,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set up the endpoint ring */ /* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring = virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, mem_flags); xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM; return -ENOMEM;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
}
ep_ring = virt_dev->eps[ep_index].new_ring; ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
......
...@@ -677,6 +677,10 @@ struct xhci_virt_device { ...@@ -677,6 +677,10 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx; struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */ /* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx; struct xhci_container_ctx *in_ctx;
/* Rings saved to ensure old alt settings can be re-instated */
struct xhci_ring **ring_cache;
int num_rings_cached;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31]; struct xhci_virt_ep eps[31];
struct completion cmd_completion; struct completion cmd_completion;
/* Status of the last command issued for this device */ /* Status of the last command issued for this device */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册