提交 39a7d726 编写于 作者: D David S. Miller

Merge branch 'thunderbolt-end-to-end-flow-control'

Mika Westerberg says:

====================
thunderbolt: net: Enable full end-to-end flow control

Thunderbolt/USB4 host controllers support full end-to-end flow control
that prevents dropping packets if there are not enough hardware receive
buffers. So far it has not been enabled for the networking driver yet
but this series changes that. There is one snag though: the second
generation (Intel Falcon Ridge) had a bug that needs special quirk to
get it working. We had that in the early stages of the Thunderbolt/USB4
driver but it got dropped because it was not needed at the time. Now we
add it back as a quirk for the host controller (NHI).

The first patch of this series is a bugfix that I'm planning to push for
v6.0-rc. Rest are v6.1 material. This also includes a patch that shows
the XDomain link type in sysfs the same way we do for USB4 routers and
updates the networking driver module description.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
// SPDX-License-Identifier: GPL-2.0
/*
* Networking over Thunderbolt cable using Apple ThunderboltIP protocol
* Networking over Thunderbolt/USB4 cables using USB4NET protocol
* (formerly Apple ThunderboltIP).
*
* Copyright (C) 2017, Intel Corporation
* Authors: Amir Levy <amir.jer.levy@intel.com>
......@@ -30,6 +31,7 @@
#define TBNET_RING_SIZE 256
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 10
#define TBNET_E2E BIT(0)
#define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K
......@@ -209,6 +211,10 @@ static const uuid_t tbnet_svc_uuid =
static struct tb_property_dir *tbnet_dir;
static bool tbnet_e2e = true;
module_param_named(e2e, tbnet_e2e, bool, 0444);
MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
enum thunderbolt_ip_type type, size_t size, u32 command_id)
......@@ -612,18 +618,13 @@ static void tbnet_connected_work(struct work_struct *work)
return;
}
/* Both logins successful so enable the high-speed DMA paths and
* start the network device queue.
/* Both logins successful so enable the rings, high-speed DMA
* paths and start the network device queue.
*
* Note we enable the DMA paths last to make sure we have primed
* the Rx ring before any incoming packets are allowed to
* arrive.
*/
ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
net->rx_ring.ring->hop,
net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n");
return;
}
tb_ring_start(net->tx_ring.ring);
tb_ring_start(net->rx_ring.ring);
......@@ -635,10 +636,21 @@ static void tbnet_connected_work(struct work_struct *work)
if (ret)
goto err_free_rx_buffers;
ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
net->rx_ring.ring->hop,
net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n");
goto err_free_tx_buffers;
}
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
return;
err_free_tx_buffers:
tbnet_free_buffers(&net->tx_ring);
err_free_rx_buffers:
tbnet_free_buffers(&net->rx_ring);
err_stop_rings:
......@@ -867,6 +879,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
unsigned int flags;
int hopid;
netif_carrier_off(dev);
......@@ -891,9 +904,14 @@ static int tbnet_open(struct net_device *dev)
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
RING_FLAG_FRAME, 0, sof_mask, eof_mask,
tbnet_start_poll, net);
flags = RING_FLAG_FRAME;
/* Only enable full E2E if the other end supports it too */
if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
flags |= RING_FLAG_E2E;
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
net->tx_ring.ring->hop, sof_mask,
eof_mask, tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");
tb_ring_free(net->tx_ring.ring);
......@@ -1356,6 +1374,7 @@ static struct tb_service_driver tbnet_driver = {
static int __init tbnet_init(void)
{
unsigned int flags;
int ret;
tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
......@@ -1365,12 +1384,11 @@ static int __init tbnet_init(void)
tb_property_add_immediate(tbnet_dir, "prtcid", 1);
tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
/* Currently only announce support for match frags ID (bit 1). Bit 0
* is reserved for full E2E flow control which we do not support at
* the moment.
*/
tb_property_add_immediate(tbnet_dir, "prtcstns",
TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
if (tbnet_e2e)
flags |= TBNET_E2E;
tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
if (ret) {
......@@ -1393,5 +1411,5 @@ module_exit(tbnet_exit);
MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Thunderbolt network driver");
MODULE_DESCRIPTION("Thunderbolt/USB4 network driver");
MODULE_LICENSE("GPL v2");
......@@ -28,7 +28,11 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1
/*
* Used with QUIRK_E2E to specify an unused HopID the Rx credits are
* transferred.
*/
#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
......@@ -38,7 +42,9 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring)
{
......@@ -458,8 +464,18 @@ static void ring_release_msix(struct tb_ring *ring)
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
if (nhi->quirks & QUIRK_E2E) {
start_hop = RING_FIRST_USABLE_HOPID + 1;
if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
}
}
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
......@@ -469,7 +485,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
* Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1.
*/
for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
......@@ -484,6 +500,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
}
}
if (ring->hop > 0 && ring->hop < start_hop) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
goto err_unlock;
}
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
......@@ -1097,12 +1118,26 @@ static void nhi_shutdown(struct tb_nhi *nhi)
static void nhi_check_quirks(struct tb_nhi *nhi)
{
/*
* Intel hardware supports auto clear of the interrupt status
* reqister right after interrupt is being issued.
*/
if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL)
if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
/*
* Intel hardware supports auto clear of the interrupt
* status register right after interrupt is being
* issued.
*/
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
/*
* Falcon Ridge controller needs the end-to-end
* flow control workaround to avoid losing Rx
* packets when RING_FLAG_E2E is set.
*/
nhi->quirks |= QUIRK_E2E;
break;
}
}
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
......
......@@ -174,10 +174,10 @@ static void tb_discover_tunnels(struct tb *tb)
}
}
static int tb_port_configure_xdomain(struct tb_port *port)
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
return usb4_port_configure_xdomain(port);
return usb4_port_configure_xdomain(port, xd);
return tb_lc_configure_xdomain(port);
}
......@@ -212,7 +212,7 @@ static void tb_scan_xdomain(struct tb_port *port)
NULL);
if (xd) {
tb_port_at(route, sw)->xdomain = xd;
tb_port_configure_xdomain(port);
tb_port_configure_xdomain(port, xd);
tb_xdomain_add(xd);
}
}
......@@ -1516,7 +1516,7 @@ static void tb_restore_children(struct tb_switch *sw)
tb_restore_children(port->remote->sw);
} else if (port->xdomain) {
tb_port_configure_xdomain(port);
tb_port_configure_xdomain(port, port->xdomain);
}
}
}
......
......@@ -1176,7 +1176,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
......
......@@ -1115,12 +1115,14 @@ static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
/**
* usb4_port_configure_xdomain() - Configure port for XDomain
* @port: USB4 port connected to another host
* @xd: XDomain that is connected to the port
*
* Marks the USB4 port as being connected to another host. Returns %0 in
* success and negative errno in failure.
* Marks the USB4 port as being connected to another host and updates
* the link type. Returns %0 in success and negative errno in failure.
*/
int usb4_port_configure_xdomain(struct tb_port *port)
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
xd->link_usb4 = link_is_usb4(port);
return usb4_set_xdomain_configured(port, true);
}
......
......@@ -53,6 +53,8 @@ static ssize_t link_show(struct device *dev, struct device_attribute *attr,
link = port->sw->link_usb4 ? "usb4" : "tbt";
else if (tb_port_has_remote(port))
link = port->remote->sw->link_usb4 ? "usb4" : "tbt";
else if (port->xdomain)
link = port->xdomain->link_usb4 ? "usb4" : "tbt";
else
link = "none";
......
......@@ -187,6 +187,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2)
* @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
* queried first
......@@ -234,6 +235,7 @@ struct tb_xdomain {
const char *device_name;
unsigned int link_speed;
unsigned int link_width;
bool link_usb4;
bool is_unplugged;
bool needs_uuid;
struct ida service_ids;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册