提交 90221170 编写于 作者: G Greg Kroah-Hartman

Merge tag 'for-usb-next-2012-03-13' of...

Merge tag 'for-usb-next-2012-03-13' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next

Hi Greg,

Here's my final pull request for 3.4.  All the patches have been under
review for some time (months in some cases).  The ring expansion patches
in particular have been tested by both me and Paul Zimmerman from
Synopsis.

They add support for:
 - Dynamic ring expansion
 - New USB 2.1 link PM errata (BESL)
 - xHCI host controller support for the Synopsis DesignWare 3 IP

The dynamic ring expansion patches finally make test 10 of the host-side
test pass, instead of failing due to no room on the endpoint ring for
the larger transfers.  I would have hoped that the ring expansion
patchset would make the Point Grey USB 3.0 camera work, but sadly it
fails to respond to a control transfer on my test system.  This doesn't
seem to be a driver bug, but it could be a device or host bug.

Felipe has tested the patches to add a platform device to the xHCI
driver on the Synopsis DesignWare 3 IP in the TI OMAP5 board.

Please pull.

Thanks,
Sarah Sharp
......@@ -27,6 +27,10 @@ config USB_XHCI_HCD
To compile this driver as a module, choose M here: the
module will be called xhci-hcd.
config USB_XHCI_PLATFORM
tristate
depends on USB_XHCI_HCD
config USB_XHCI_HCD_DEBUGGING
bool "Debugging for the xHCI host controller"
depends on USB_XHCI_HCD
......
......@@ -15,6 +15,10 @@ xhci-hcd-y := xhci.o xhci-mem.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
xhci-hcd-y += xhci-plat.o
endif
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_PCI) += pci-quirks.o
......
......@@ -34,10 +34,12 @@
* Section 4.11.1.1:
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state, gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
int i;
seg = kzalloc(sizeof *seg, flags);
if (!seg)
......@@ -50,6 +52,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
}
memset(seg->trbs, 0, SEGMENT_SIZE);
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= TRB_CYCLE;
}
seg->dma = dma;
seg->next = NULL;
......@@ -65,6 +72,20 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
kfree(seg);
}
static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *first)
{
struct xhci_segment *seg;
seg = first->next;
while (seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
xhci_segment_free(xhci, first);
}
/*
* Make the prev segment point to the next segment.
*
......@@ -73,14 +94,14 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
struct xhci_segment *next, bool link_trbs, bool isoc)
struct xhci_segment *next, enum xhci_ring_type type)
{
u32 val;
if (!prev || !next)
return;
prev->next = next;
if (link_trbs) {
if (type != TYPE_EVENT) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(next->dma);
......@@ -91,35 +112,55 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
/* Always set the chain bit with 0.95 hardware */
/* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) ||
(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
}
/*
* Link the ring to the new segments.
* Set Toggle Cycle for the new ring if needed.
*/
static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *first, struct xhci_segment *last,
unsigned int num_segs)
{
struct xhci_segment *next;
if (!ring || !first || !last)
return;
next = ring->enq_seg->next;
xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
xhci_link_segments(xhci, last, next, ring->type);
ring->num_segs += num_segs;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
&= ~cpu_to_le32(LINK_TOGGLE);
last->trbs[TRBS_PER_SEGMENT-1].link.control
|= cpu_to_le32(LINK_TOGGLE);
ring->last_seg = last;
}
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
struct xhci_segment *seg;
struct xhci_segment *first_seg;
if (!ring)
return;
if (ring->first_seg) {
first_seg = ring->first_seg;
seg = first_seg->next;
while (seg != first_seg) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
xhci_segment_free(xhci, first_seg);
ring->first_seg = NULL;
}
if (ring->first_seg)
xhci_free_segments_for_ring(xhci, ring->first_seg);
kfree(ring);
}
static void xhci_initialize_ring_info(struct xhci_ring *ring)
static void xhci_initialize_ring_info(struct xhci_ring *ring,
unsigned int cycle_state)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
......@@ -129,11 +170,53 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = 1;
ring->cycle_state = cycle_state;
/* Not necessary for new rings, but needed for re-initialized rings */
ring->enq_updates = 0;
ring->deq_updates = 0;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
* accounting purpose
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, gfp_t flags)
{
struct xhci_segment *prev;
prev = xhci_segment_alloc(xhci, cycle_state, flags);
if (!prev)
return -ENOMEM;
num_segs--;
*first = prev;
while (num_segs > 0) {
struct xhci_segment *next;
next = xhci_segment_alloc(xhci, cycle_state, flags);
if (!next) {
xhci_free_segments_for_ring(xhci, *first);
return -ENOMEM;
}
xhci_link_segments(xhci, prev, next, type);
prev = next;
num_segs--;
}
xhci_link_segments(xhci, prev, *first, type);
*last = prev;
return 0;
}
/**
......@@ -144,44 +227,34 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, gfp_t flags)
{
struct xhci_ring *ring;
struct xhci_segment *prev;
int ret;
ring = kzalloc(sizeof *(ring), flags);
if (!ring)
return NULL;
ring->num_segs = num_segs;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ring->first_seg = xhci_segment_alloc(xhci, flags);
if (!ring->first_seg)
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
&ring->last_seg, num_segs, cycle_state, type, flags);
if (ret)
goto fail;
num_segs--;
prev = ring->first_seg;
while (num_segs > 0) {
struct xhci_segment *next;
next = xhci_segment_alloc(xhci, flags);
if (!next)
goto fail;
xhci_link_segments(xhci, prev, next, link_trbs, isoc);
prev = next;
num_segs--;
}
xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
if (link_trbs) {
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
}
xhci_initialize_ring_info(ring);
xhci_initialize_ring_info(ring, cycle_state);
return ring;
fail:
......@@ -217,23 +290,64 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring, bool isoc)
struct xhci_ring *ring, unsigned int cycle_state,
enum xhci_ring_type type)
{
struct xhci_segment *seg = ring->first_seg;
int i;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= TRB_CYCLE;
}
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, 1, isoc);
xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next;
} while (seg != ring->first_seg);
xhci_initialize_ring_info(ring);
ring->type = type;
xhci_initialize_ring_info(ring, cycle_state);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
*/
INIT_LIST_HEAD(&ring->td_list);
}
/*
* Expand an existing ring.
* Look for a cached ring or allocate a new ring which has same segment numbers
* and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags)
{
struct xhci_segment *first;
struct xhci_segment *last;
unsigned int num_segs;
unsigned int num_segs_needed;
int ret;
num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
(TRBS_PER_SEGMENT - 1);
/* Allocate number of segments we needed, or double the ring size */
num_segs = ring->num_segs > num_segs_needed ?
ring->num_segs : num_segs_needed;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
num_segs, ring->cycle_state, ring->type, flags);
if (ret)
return -ENOMEM;
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
ring->num_segs);
return 0;
}
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
......@@ -528,7 +642,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
......@@ -862,7 +976,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
if (!dev->eps[0].ring)
goto fail;
......@@ -1300,24 +1414,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
enum xhci_ring_type type;
u32 max_esit_payload;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
/*
* Isochronous endpoint ring needs bigger size because one isoc URB
* carries multiple packets and it will insert multiple tds to the
* ring.
* This should be replaced with dynamic ring resizing in the future.
*/
if (usb_endpoint_xfer_isoc(&ep->desc))
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 8, true, true, mem_flags);
else
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, false, mem_flags);
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
......@@ -1327,7 +1433,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1, type);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
......@@ -2235,7 +2341,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
......@@ -2266,7 +2372,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg(xhci, "// Allocating event ring\n");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags);
if (!xhci->event_ring)
goto fail;
......
/*
* xhci-plat.c - xHCI host controller driver platform Bus Glue.
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A lot of code borrowed from the Linux xHCI driver.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "xhci.h"
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_BROKEN_MSI;
}
/* called during probe() after chip reset completes */
static int xhci_plat_setup(struct usb_hcd *hcd)
{
return xhci_gen_setup(hcd, xhci_plat_quirks);
}
static const struct hc_driver xhci_plat_xhci_driver = {
.description = "xhci-hcd",
.product_desc = "xHCI Host Controller",
.hcd_priv_size = sizeof(struct xhci_hcd *),
/*
* generic hardware linkage
*/
.irq = xhci_irq,
.flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
*/
.reset = xhci_plat_setup,
.start = xhci_run,
.stop = xhci_stop,
.shutdown = xhci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = xhci_urb_enqueue,
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
.alloc_streams = xhci_alloc_streams,
.free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.update_hub_device = xhci_update_hub_device,
.reset_device = xhci_discover_or_reset_device,
/*
* scheduling support
*/
.get_frame_number = xhci_get_frame,
/* Root hub support */
.hub_control = xhci_hub_control,
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
};
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct hc_driver *driver;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
int ret;
int irq;
if (usb_disabled())
return -ENODEV;
driver = &xhci_plat_xhci_driver;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
ret = -EBUSY;
goto put_hcd;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
dev_dbg(&pdev->dev, "error mapping memory\n");
ret = -EFAULT;
goto release_mem_region;
}
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto unmap_registers;
/* USB 2.0 roothub is stored in the platform_device now. */
hcd = dev_get_drvdata(&pdev->dev);
xhci = hcd_to_xhci(hcd);
xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto dealloc_usb2_hcd;
}
/*
* Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
* is called by usb_add_hcd().
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
return 0;
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
unmap_registers:
iounmap(hcd->regs);
release_mem_region:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
put_hcd:
usb_put_hcd(hcd);
return ret;
}
static int xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
usb_remove_hcd(hcd);
iounmap(hcd->regs);
usb_put_hcd(hcd);
kfree(xhci);
return 0;
}
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
.driver = {
.name = "xhci-hcd",
},
};
MODULE_ALIAS("platform:xhci-hcd");
int xhci_register_plat(void)
{
return platform_driver_register(&usb_xhci_driver);
}
void xhci_unregister_plat(void)
{
platform_driver_unregister(&usb_xhci_driver);
}
......@@ -143,17 +143,25 @@ static void next_trb(struct xhci_hcd *xhci,
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
union xhci_trb *next = ++(ring->dequeue);
union xhci_trb *next;
unsigned long long addr;
ring->deq_updates++;
/* If this is not event ring, there is one more usable TRB */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
ring->num_trbs_free++;
next = ++(ring->dequeue);
/* Update the dequeue pointer further if that was a link TRB or we're at
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->deq_seg = ring->deq_seg->next;
......@@ -181,13 +189,17 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming, bool isoc)
bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
unsigned long long addr;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
......@@ -195,35 +207,35 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
/*
* If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we
* don't want to give the link TRB to the
* hardware just yet. We'll give the link TRB
* back in prepare_ring() just before we enqueue
* the TD at the top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
if (ring->type != TYPE_EVENT) {
/*
* If the caller doesn't plan on enqueueing more
* TDs before ringing the doorbell, then we
* don't want to give the link TRB to the
* hardware just yet. We'll give the link TRB
* back in prepare_ring() just before we enqueue
* the TD at the top of the ring.
*/
if (!chain && !more_trbs_coming)
break;
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
/* If we're not dealing with 0.95 hardware or
* isoc rings on AMD 0.96 host,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
next->link.control &=
cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
......@@ -237,55 +249,23 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
/*
* Check to see if there's room to enqueue num_trbs on the ring. See rules
* above.
* FIXME: this would be simpler and faster if we just kept track of the number
* of free TRBs in a ring.
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment. See rules above.
*/
static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
int i;
union xhci_trb *enq = ring->enqueue;
struct xhci_segment *enq_seg = ring->enq_seg;
struct xhci_segment *cur_seg;
unsigned int left_on_ring;
/* If we are currently pointing to a link TRB, advance the
* enqueue pointer before checking for space */
while (last_trb(xhci, ring, enq_seg, enq)) {
enq_seg = enq_seg->next;
enq = enq_seg->trbs;
}
/* Check if ring is empty */
if (enq == ring->dequeue) {
/* Can't use link trbs */
left_on_ring = TRBS_PER_SEGMENT - 1;
for (cur_seg = enq_seg->next; cur_seg != enq_seg;
cur_seg = cur_seg->next)
left_on_ring += TRBS_PER_SEGMENT - 1;
/* Always need one TRB free in the ring. */
left_on_ring -= 1;
if (num_trbs > left_on_ring) {
xhci_warn(xhci, "Not enough room on ring; "
"need %u TRBs, %u TRBs left\n",
num_trbs, left_on_ring);
return 0;
}
return 1;
}
/* Make sure there's an extra empty TRB available */
for (i = 0; i <= num_trbs; ++i) {
if (enq == ring->dequeue)
int num_trbs_in_deq_seg;
if (ring->num_trbs_free < num_trbs)
return 0;
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
enq++;
while (last_trb(xhci, ring, enq_seg, enq)) {
enq_seg = enq_seg->next;
enq = enq_seg->trbs;
}
}
return 1;
}
......@@ -892,6 +872,43 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
xhci_dbg(xhci, "xHCI host controller is dead.\n");
}
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
unsigned int ep_index)
{
union xhci_trb *dequeue_temp;
int num_trbs_free_temp;
bool revert = false;
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
ep_ring->dequeue)) {
if (ep_ring->dequeue ==
dev->eps[ep_index].queued_deq_ptr)
break;
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
if (ep_ring->dequeue == dequeue_temp) {
revert = true;
break;
}
}
if (revert) {
xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
ep_ring->num_trbs_free = num_trbs_free_temp;
}
}
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
......@@ -973,8 +990,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
"Ptr command & xHCI internal state.\n");
......@@ -1185,7 +1202,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci->error_bitmask |= 1 << 6;
break;
}
inc_deq(xhci, xhci->cmd_ring, false);
inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
......@@ -1398,7 +1415,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
inc_deq(xhci, xhci->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
......@@ -1593,8 +1610,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
}
td_cleanup:
......@@ -1842,8 +1859,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
return finish_td(xhci, td, NULL, event, ep, status, true);
}
......@@ -2230,7 +2247,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Will roll back to continue process missed tds.
*/
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
inc_deq(xhci, xhci->event_ring, true);
inc_deq(xhci, xhci->event_ring);
}
if (ret) {
......@@ -2345,7 +2362,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
if (update_ptrs)
/* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring, true);
inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
......@@ -2461,7 +2478,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool consumer, bool more_trbs_coming, bool isoc,
bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
......@@ -2471,7 +2488,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
inc_enq(xhci, ring, more_trbs_coming);
}
/*
......@@ -2479,8 +2496,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int num_trbs_needed;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
case EP_STATE_DISABLED:
......@@ -2508,11 +2527,25 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/
return -EINVAL;
}
if (!room_on_ring(xhci, ep_ring, num_trbs)) {
/* FIXME allocate more room */
xhci_err(xhci, "ERROR no room on ep ring\n");
return -ENOMEM;
}
while (1) {
if (room_on_ring(xhci, ep_ring, num_trbs))
break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
return -ENOMEM;
}
xhci_dbg(xhci, "ERROR no room on ep ring, "
"try ring expansion\n");
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
mem_flags)) {
xhci_err(xhci, "Ring expansion failed\n");
return -ENOMEM;
}
};
if (enqueue_is_link_trb(ep_ring)) {
struct xhci_ring *ring = ep_ring;
......@@ -2524,8 +2557,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
if (!xhci_link_trb_quirk(xhci) && !(isoc &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
if (!xhci_link_trb_quirk(xhci) &&
!(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
......@@ -2553,7 +2587,6 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
bool isoc,
gfp_t mem_flags)
{
int ret;
......@@ -2571,7 +2604,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, isoc, mem_flags);
num_trbs, mem_flags);
if (ret)
return ret;
......@@ -2781,7 +2814,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
......@@ -2869,7 +2902,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
......@@ -2951,7 +2984,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
......@@ -3023,7 +3056,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
......@@ -3080,7 +3113,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
......@@ -3113,7 +3146,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
queue_trb(xhci, ep_ring, false, true, false,
queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
......@@ -3133,7 +3166,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, true, false,
queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
......@@ -3149,7 +3182,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, false, false,
queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
......@@ -3289,8 +3322,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, true,
mem_flags);
urb->stream_id, trbs_per_td, urb, i, mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
......@@ -3360,7 +3392,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
......@@ -3407,6 +3439,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ep_ring->enqueue = urb_priv->td[0]->first_trb;
ep_ring->enq_seg = urb_priv->td[0]->start_seg;
ep_ring->cycle_state = start_cycle;
ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
return ret;
}
......@@ -3443,7 +3476,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, true, mem_flags);
num_trbs, mem_flags);
if (ret)
return ret;
......@@ -3479,6 +3512,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}
......@@ -3502,7 +3537,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
reserved_trbs, false, GFP_ATOMIC);
reserved_trbs, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
......@@ -3510,8 +3545,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
field3, field4 | xhci->cmd_ring->cycle_state);
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
field4 | xhci->cmd_ring->cycle_state);
return 0;
}
......
......@@ -729,6 +729,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
ring->enq_seg = ring->deq_seg;
ring->enqueue = ring->dequeue;
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
/*
* Ring is now zeroed, so the HW should look for change of ownership
* when the cycle bit is set to 1.
......@@ -3614,26 +3615,38 @@ static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
/* Calculate HIRD/BESL for USB2 PORTPMSC*/
static int xhci_calculate_hird_besl(int u2del, bool use_besl)
static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
struct usb_device *udev)
{
int hird;
int u2del, besl, besl_host;
int besl_device = 0;
u32 field;
u2del = HCS_U2_LATENCY(xhci->hcs_params3);
field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
if (use_besl) {
for (hird = 0; hird < 16; hird++) {
if (xhci_besl_encoding[hird] >= u2del)
if (field & USB_BESL_SUPPORT) {
for (besl_host = 0; besl_host < 16; besl_host++) {
if (xhci_besl_encoding[besl_host] >= u2del)
break;
}
/* Use baseline BESL value as default */
if (field & USB_BESL_BASELINE_VALID)
besl_device = USB_GET_BESL_BASELINE(field);
else if (field & USB_BESL_DEEP_VALID)
besl_device = USB_GET_BESL_DEEP(field);
} else {
if (u2del <= 50)
hird = 0;
besl_host = 0;
else
hird = (u2del - 51) / 75 + 1;
if (hird > 15)
hird = 15;
besl_host = (u2del - 51) / 75 + 1;
}
return hird;
besl = besl_host + besl_device;
if (besl > 15)
besl = 15;
return besl;
}
static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
......@@ -3646,7 +3659,7 @@ static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
u32 temp, dev_id;
unsigned int port_num;
unsigned long flags;
int u2del, hird;
int hird;
int ret;
if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
......@@ -3692,12 +3705,7 @@ static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
* HIRD or BESL shoule be used. See USB2.0 LPM errata.
*/
pm_addr = port_array[port_num] + 1;
u2del = HCS_U2_LATENCY(xhci->hcs_params3);
if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
hird = xhci_calculate_hird_besl(u2del, 1);
else
hird = xhci_calculate_hird_besl(u2del, 0);
hird = xhci_calculate_hird_besl(xhci, udev);
temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
xhci_writel(xhci, temp, pm_addr);
......@@ -3776,7 +3784,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
u32 temp;
unsigned int port_num;
unsigned long flags;
int u2del, hird;
int hird;
if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
......@@ -3799,11 +3807,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num);
u2del = HCS_U2_LATENCY(xhci->hcs_params3);
if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
hird = xhci_calculate_hird_besl(u2del, 1);
else
hird = xhci_calculate_hird_besl(u2del, 0);
hird = xhci_calculate_hird_besl(xhci, udev);
if (enable) {
temp &= ~PORT_HIRD_MASK;
......@@ -3964,7 +3968,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
int retval;
u32 temp;
hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
if (usb_hcd_is_primary_hcd(hcd)) {
xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
......@@ -4059,6 +4064,11 @@ static int __init xhci_hcd_init(void)
printk(KERN_DEBUG "Problem registering PCI driver.");
return retval;
}
retval = xhci_register_plat();
if (retval < 0) {
printk(KERN_DEBUG "Problem registering platform driver.");
goto unreg_pci;
}
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
......@@ -4078,11 +4088,15 @@ static int __init xhci_hcd_init(void)
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
return 0;
unreg_pci:
xhci_unregister_pci();
return retval;
}
module_init(xhci_hcd_init);
static void __exit xhci_hcd_cleanup(void)
{
xhci_unregister_pci();
xhci_unregister_plat();
}
module_exit(xhci_hcd_cleanup);
......@@ -1223,10 +1223,7 @@ union xhci_trb {
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
* Change this if you change TRBS_PER_SEGMENT!
*/
#define SEGMENT_SHIFT 10
#define SEGMENT_SHIFT (__ffs(SEGMENT_SIZE))
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
......@@ -1253,8 +1250,19 @@ struct xhci_dequeue_state {
int new_cycle_state;
};
enum xhci_ring_type {
TYPE_CTRL = 0,
TYPE_ISOC,
TYPE_BULK,
TYPE_INTR,
TYPE_STREAM,
TYPE_COMMAND,
TYPE_EVENT,
};
struct xhci_ring {
struct xhci_segment *first_seg;
struct xhci_segment *last_seg;
union xhci_trb *enqueue;
struct xhci_segment *enq_seg;
unsigned int enq_updates;
......@@ -1269,6 +1277,10 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
unsigned int num_segs;
unsigned int num_trbs_free;
unsigned int num_trbs_free_temp;
enum xhci_ring_type type;
bool last_td_was_short;
};
......@@ -1610,6 +1622,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
......@@ -1649,6 +1663,17 @@ static inline int xhci_register_pci(void) { return 0; }
static inline void xhci_unregister_pci(void) {}
#endif
#if defined(CONFIG_USB_XHCI_PLATFORM) \
|| defined(CONFIG_USB_XHCI_PLATFORM_MODULE)
int xhci_register_plat(void);
void xhci_unregister_plat(void);
#else
static inline int xhci_register_plat(void)
{ return 0; }
static inline void xhci_unregister_plat(void)
{ }
#endif
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
void xhci_quiesce(struct xhci_hcd *xhci);
......
......@@ -789,6 +789,11 @@ struct usb_ext_cap_descriptor { /* Link Power Management */
__u8 bDevCapabilityType;
__le32 bmAttributes;
#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */
#define USB_BESL_SUPPORT (1 << 2) /* supports BESL */
#define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/
#define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */
#define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8)
#define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12)
} __attribute__((packed));
#define USB_DT_USB_EXT_CAP_SIZE 7
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册