提交 d195fcff 编写于 作者: X Xenia Ragiadakou 提交者: Sarah Sharp

xhci: trace debug messages related to driver initialization and unload

This patch defines a new trace event, which is called xhci_dbg_init
and belongs to the event class xhci_log_msg, and adds tracepoints that
trace the debug statements in the functions used to start and stop the
xhci-hcd driver.

Also, it removes an unnecessary cast of variable val to unsigned int
in xhci_mem_init(), since val is already declared as unsigned int.
Signed-off-by: NXenia Ragiadakou <burzalodowa@gmail.com>
Signed-off-by: NSarah Sharp <sarah.a.sharp@linux.intel.com>
上级 aa50b290
...@@ -1545,7 +1545,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) ...@@ -1545,7 +1545,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
struct device *dev = xhci_to_hcd(xhci)->self.controller; struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating %d scratchpad buffers", num_sp);
if (!num_sp) if (!num_sp)
return 0; return 0;
...@@ -1702,11 +1703,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -1702,11 +1703,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
dma_free_coherent(&pdev->dev, size, dma_free_coherent(&pdev->dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr); xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL; xhci->erst.entries = NULL;
xhci_dbg(xhci, "Freed ERST\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
if (xhci->event_ring) if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring); xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL; xhci->event_ring = NULL;
xhci_dbg(xhci, "Freed event ring\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
if (xhci->lpm_command) if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command); xhci_free_command(xhci, xhci->lpm_command);
...@@ -1714,7 +1715,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -1714,7 +1715,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->cmd_ring) if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring); xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL; xhci->cmd_ring = NULL;
xhci_dbg(xhci, "Freed command ring\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
list_for_each_entry_safe(cur_cd, next_cd, list_for_each_entry_safe(cur_cd, next_cd,
&xhci->cancel_cmd_list, cancel_cmd_list) { &xhci->cancel_cmd_list, cancel_cmd_list) {
list_del(&cur_cd->cancel_cmd_list); list_del(&cur_cd->cancel_cmd_list);
...@@ -1727,22 +1728,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -1727,22 +1728,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->segment_pool) if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool); dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL; xhci->segment_pool = NULL;
xhci_dbg(xhci, "Freed segment pool\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
if (xhci->device_pool) if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool); dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL; xhci->device_pool = NULL;
xhci_dbg(xhci, "Freed device context pool\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
if (xhci->small_streams_pool) if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool); dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL; xhci->small_streams_pool = NULL;
xhci_dbg(xhci, "Freed small stream array pool\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
if (xhci->medium_streams_pool) if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool); dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL; xhci->medium_streams_pool = NULL;
xhci_dbg(xhci, "Freed medium stream array pool\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
if (xhci->dcbaa) if (xhci->dcbaa)
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
...@@ -1968,8 +1971,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) ...@@ -1968,8 +1971,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
* there might be more events to service. * there might be more events to service.
*/ */
temp &= ~ERST_EHB; temp &= ~ERST_EHB;
xhci_dbg(xhci, "// Write event ring dequeue pointer, " xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"preserving EHB bit\n"); "// Write event ring dequeue pointer, "
"preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue); &xhci->ir_set->erst_dequeue);
} }
...@@ -1992,8 +1996,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, ...@@ -1992,8 +1996,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
temp = xhci_readl(xhci, addr + 2); temp = xhci_readl(xhci, addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp); port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp); port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"count = %u, revision = 0x%x\n", "Ext Cap %p, port offset = %u, "
"count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision); addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */ /* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
...@@ -2007,15 +2012,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, ...@@ -2007,15 +2012,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* Check the host's USB2 LPM capability */ /* Check the host's USB2 LPM capability */
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
(temp & XHCI_L1C)) { (temp & XHCI_L1C)) {
xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 0.96: support USB2 software lpm");
xhci->sw_lpm_support = 1; xhci->sw_lpm_support = 1;
} }
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 1.0: support USB2 software lpm");
xhci->sw_lpm_support = 1; xhci->sw_lpm_support = 1;
if (temp & XHCI_HLC) { if (temp & XHCI_HLC) {
xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 1.0: support USB2 hardware lpm");
xhci->hw_lpm_support = 1; xhci->hw_lpm_support = 1;
} }
} }
...@@ -2139,18 +2147,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2139,18 +2147,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci_warn(xhci, "No ports on the roothubs?\n"); xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV; return -ENODEV;
} }
xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->num_usb2_ports, xhci->num_usb3_ports); xhci->num_usb2_ports, xhci->num_usb3_ports);
/* Place limits on the number of roothub ports so that the hub /* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate. * descriptors aren't longer than the USB core will allocate.
*/ */
if (xhci->num_usb3_ports > 15) { if (xhci->num_usb3_ports > 15) {
xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 3.0 roothub ports to 15.");
xhci->num_usb3_ports = 15; xhci->num_usb3_ports = 15;
} }
if (xhci->num_usb2_ports > USB_MAXCHILDREN) { if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN); USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN; xhci->num_usb2_ports = USB_MAXCHILDREN;
} }
...@@ -2175,8 +2186,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2175,8 +2186,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb2_ports[port_index] = xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base + &xhci->op_regs->port_status_base +
NUM_PORT_REGS*i; NUM_PORT_REGS*i;
xhci_dbg(xhci, "USB 2.0 port at index %u, " xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"addr = %p\n", i, "USB 2.0 port at index %u, "
"addr = %p", i,
xhci->usb2_ports[port_index]); xhci->usb2_ports[port_index]);
port_index++; port_index++;
if (port_index == xhci->num_usb2_ports) if (port_index == xhci->num_usb2_ports)
...@@ -2195,8 +2207,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2195,8 +2207,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb3_ports[port_index] = xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base + &xhci->op_regs->port_status_base +
NUM_PORT_REGS*i; NUM_PORT_REGS*i;
xhci_dbg(xhci, "USB 3.0 port at index %u, " xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"addr = %p\n", i, "USB 3.0 port at index %u, "
"addr = %p", i,
xhci->usb3_ports[port_index]); xhci->usb3_ports[port_index]);
port_index++; port_index++;
if (port_index == xhci->num_usb3_ports) if (port_index == xhci->num_usb3_ports)
...@@ -2220,32 +2233,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2220,32 +2233,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cancel_cmd_list); INIT_LIST_HEAD(&xhci->cancel_cmd_list);
page_size = xhci_readl(xhci, &xhci->op_regs->page_size); page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0) if ((0x1 & page_size) != 0)
break; break;
page_size = page_size >> 1; page_size = page_size >> 1;
} }
if (i < 16) if (i < 16)
xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size of %iK", (1 << (i+12)) / 1024);
else else
xhci_warn(xhci, "WARN: no supported page size\n"); xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */ /* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12; xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift; xhci->page_size = 1 << xhci->page_shift;
xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"HCD page size set to %iK", xhci->page_size / 1024);
/* /*
* Program the Number of Device Slots Enabled field in the CONFIG * Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle. * register with the max value of slots the HC can handle.
*/ */
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
(unsigned int) val); "// xHC can handle at most %d device slots.", val);
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK); val |= (val2 & ~HCS_SLOTS_MASK);
xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
(unsigned int) val); "// Setting Max device slots reg = 0x%x.", val);
xhci_writel(xhci, val, &xhci->op_regs->config_reg); xhci_writel(xhci, val, &xhci->op_regs->config_reg);
/* /*
...@@ -2258,7 +2274,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2258,7 +2274,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail; goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma; xhci->dcbaa->dma = dma;
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
...@@ -2297,8 +2314,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2297,8 +2314,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring) if (!xhci->cmd_ring)
goto fail; goto fail;
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
xhci_dbg(xhci, "First segment DMA is 0x%llx\n", "Allocated command ring at %p", xhci->cmd_ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma); (unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */ /* Set the address in the Command Ring Control register */
...@@ -2306,7 +2324,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2306,7 +2324,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state; xhci->cmd_ring->cycle_state;
xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%x", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci); xhci_dbg_cmd_ptrs(xhci);
...@@ -2322,8 +2341,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2322,8 +2341,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val = xhci_readl(xhci, &xhci->cap_regs->db_off); val = xhci_readl(xhci, &xhci->cap_regs->db_off);
val &= DBOFF_MASK; val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" xhci_dbg_trace(xhci, trace_xhci_dbg_init,
" from cap regs base addr\n", val); "// Doorbell array is located at offset 0x%x"
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val; xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci); xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci); xhci_print_run_regs(xhci);
...@@ -2334,7 +2354,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2334,7 +2354,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* Event ring setup: Allocate a normal ring, but also setup * Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3. * the event ring segment table (ERST). Section 4.9.3.
*/ */
xhci_dbg(xhci, "// Allocating event ring\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags); flags);
if (!xhci->event_ring) if (!xhci->event_ring)
...@@ -2347,13 +2367,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2347,13 +2367,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
GFP_KERNEL); GFP_KERNEL);
if (!xhci->erst.entries) if (!xhci->erst.entries)
goto fail; goto fail;
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Allocated event ring segment table at 0x%llx",
(unsigned long long)dma); (unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS; xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma; xhci->erst.erst_dma_addr = dma;
xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
xhci->erst.num_entries, xhci->erst.num_entries,
xhci->erst.entries, xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr); (unsigned long long)xhci->erst.erst_dma_addr);
...@@ -2371,13 +2393,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2371,13 +2393,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val = xhci_readl(xhci, &xhci->ir_set->erst_size); val = xhci_readl(xhci, &xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK; val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS; val |= ERST_NUM_SEGS;
xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write ERST size = %i to ir_set 0 (some bits preserved)",
val); val);
xhci_writel(xhci, val, &xhci->ir_set->erst_size); xhci_writel(xhci, val, &xhci->ir_set->erst_size);
xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST entries to point to event ring.");
/* set the segment table base address */ /* set the segment table base address */
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr); (unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK; val_64 &= ERST_PTR_MASK;
...@@ -2386,7 +2411,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2386,7 +2411,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */ /* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci); xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0); xhci_print_ir_set(xhci, 0);
/* /*
......
...@@ -57,6 +57,11 @@ DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb, ...@@ -57,6 +57,11 @@ DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
TP_ARGS(vaf) TP_ARGS(vaf)
); );
DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
DECLARE_EVENT_CLASS(xhci_log_ctx, DECLARE_EVENT_CLASS(xhci_log_ctx,
TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
unsigned int ep_num), unsigned int ep_num),
......
...@@ -101,7 +101,7 @@ void xhci_quiesce(struct xhci_hcd *xhci) ...@@ -101,7 +101,7 @@ void xhci_quiesce(struct xhci_hcd *xhci)
int xhci_halt(struct xhci_hcd *xhci) int xhci_halt(struct xhci_hcd *xhci)
{ {
int ret; int ret;
xhci_dbg(xhci, "// Halt the HC\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
xhci_quiesce(xhci); xhci_quiesce(xhci);
ret = xhci_handshake(xhci, &xhci->op_regs->status, ret = xhci_handshake(xhci, &xhci->op_regs->status,
...@@ -125,7 +125,7 @@ static int xhci_start(struct xhci_hcd *xhci) ...@@ -125,7 +125,7 @@ static int xhci_start(struct xhci_hcd *xhci)
temp = xhci_readl(xhci, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_RUN); temp |= (CMD_RUN);
xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
temp); temp);
xhci_writel(xhci, temp, &xhci->op_regs->command); xhci_writel(xhci, temp, &xhci->op_regs->command);
...@@ -163,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci) ...@@ -163,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
return 0; return 0;
} }
xhci_dbg(xhci, "// Reset the HC\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = xhci_readl(xhci, &xhci->op_regs->command); command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RESET; command |= CMD_RESET;
xhci_writel(xhci, command, &xhci->op_regs->command); xhci_writel(xhci, command, &xhci->op_regs->command);
...@@ -173,7 +173,8 @@ int xhci_reset(struct xhci_hcd *xhci) ...@@ -173,7 +173,8 @@ int xhci_reset(struct xhci_hcd *xhci)
if (ret) if (ret)
return ret; return ret;
xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wait for controller to be ready for doorbell rings");
/* /*
* xHCI cannot write to any doorbells or operational registers other * xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared. * than status until the "Controller Not Ready" flag is cleared.
...@@ -215,14 +216,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) ...@@ -215,14 +216,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
ret = pci_enable_msi(pdev); ret = pci_enable_msi(pdev);
if (ret) { if (ret) {
xhci_dbg(xhci, "failed to allocate MSI entry\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"failed to allocate MSI entry");
return ret; return ret;
} }
ret = request_irq(pdev->irq, xhci_msi_irq, ret = request_irq(pdev->irq, xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci)); 0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) { if (ret) {
xhci_dbg(xhci, "disable MSI interrupt\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"disable MSI interrupt");
pci_disable_msi(pdev); pci_disable_msi(pdev);
} }
...@@ -285,7 +288,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) ...@@ -285,7 +288,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) { if (ret) {
xhci_dbg(xhci, "Failed to enable MSI-X\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Failed to enable MSI-X");
goto free_entries; goto free_entries;
} }
...@@ -301,7 +305,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) ...@@ -301,7 +305,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
return ret; return ret;
disable_msix: disable_msix:
xhci_dbg(xhci, "disable MSI-X interrupt\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
xhci_free_irq(xhci); xhci_free_irq(xhci);
pci_disable_msix(pdev); pci_disable_msix(pdev);
free_entries: free_entries:
...@@ -509,17 +513,18 @@ int xhci_init(struct usb_hcd *hcd) ...@@ -509,17 +513,18 @@ int xhci_init(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0; int retval = 0;
xhci_dbg(xhci, "xhci_init\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock); spin_lock_init(&xhci->lock);
if (xhci->hci_version == 0x95 && link_quirk) { if (xhci->hci_version == 0x95 && link_quirk) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Not clearing Link TRB chain bits."); "QUIRK: Not clearing Link TRB chain bits.");
xhci->quirks |= XHCI_LINK_TRB_QUIRK; xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else { } else {
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI doesn't need link TRB QUIRK");
} }
retval = xhci_mem_init(xhci, GFP_KERNEL); retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
/* Initializing Compliance Mode Recovery Data If Needed */ /* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) { if (xhci_compliance_mode_recovery_timer_quirk_check()) {
...@@ -545,7 +550,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci) ...@@ -545,7 +550,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
if (xhci->quirks & XHCI_NEC_HOST) if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB3 roothub");
return 0; return 0;
} }
...@@ -576,7 +582,7 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -576,7 +582,7 @@ int xhci_run(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd)) if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci); return xhci_run_finished(xhci);
xhci_dbg(xhci, "xhci_run\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
ret = xhci_try_enable_msi(hcd); ret = xhci_try_enable_msi(hcd);
if (ret) if (ret)
...@@ -594,9 +600,11 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -594,9 +600,11 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_ring_ptrs(xhci, xhci->event_ring); xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK; temp_64 &= ~ERST_PTR_MASK;
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
xhci_dbg(xhci, "// Set the interrupt modulation register\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set the interrupt modulation register");
temp = xhci_readl(xhci, &xhci->ir_set->irq_control); temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK; temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (u32) 160; temp |= (u32) 160;
...@@ -605,12 +613,13 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -605,12 +613,13 @@ int xhci_run(struct usb_hcd *hcd)
/* Set the HCD state before we enable the irqs */ /* Set the HCD state before we enable the irqs */
temp = xhci_readl(xhci, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_EIE); temp |= (CMD_EIE);
xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
temp); "// Enable interrupts, cmd = 0x%x.", temp);
xhci_writel(xhci, temp, &xhci->op_regs->command); xhci_writel(xhci, temp, &xhci->op_regs->command);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp), xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending); &xhci->ir_set->irq_pending);
...@@ -620,7 +629,8 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -620,7 +629,8 @@ int xhci_run(struct usb_hcd *hcd)
xhci_queue_vendor_command(xhci, 0, 0, 0, xhci_queue_vendor_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW)); TRB_TYPE(TRB_NEC_GET_FW));
xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
return 0; return 0;
} }
...@@ -680,7 +690,8 @@ void xhci_stop(struct usb_hcd *hcd) ...@@ -680,7 +690,8 @@ void xhci_stop(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_AMD_PLL_FIX) if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put(); usb_amd_dev_put();
xhci_dbg(xhci, "// Disabling event ring interrupts\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Disabling event ring interrupts");
temp = xhci_readl(xhci, &xhci->op_regs->status); temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
...@@ -688,9 +699,10 @@ void xhci_stop(struct usb_hcd *hcd) ...@@ -688,9 +699,10 @@ void xhci_stop(struct usb_hcd *hcd)
&xhci->ir_set->irq_pending); &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0); xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n"); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci); xhci_mem_cleanup(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_stop completed - status = %x",
xhci_readl(xhci, &xhci->op_regs->status)); xhci_readl(xhci, &xhci->op_regs->status));
} }
...@@ -716,7 +728,8 @@ void xhci_shutdown(struct usb_hcd *hcd) ...@@ -716,7 +728,8 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_cleanup_msix(xhci); xhci_cleanup_msix(xhci);
xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_shutdown completed - status = %x",
xhci_readl(xhci, &xhci->op_regs->status)); xhci_readl(xhci, &xhci->op_regs->status));
} }
...@@ -758,7 +771,8 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) ...@@ -758,7 +771,8 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
xhci->cmd_ring->dequeue) & xhci->cmd_ring->dequeue) &
(u64) ~CMD_RING_RSVD_BITS) | (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state; xhci->cmd_ring->cycle_state;
xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%llx",
(long unsigned long) val_64); (long unsigned long) val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册