提交 0e4c9f13 编写于 作者: D David S. Miller

Merge branch 'nfp-concurrency'

Jakub Kicinski says:

====================
nfp: allow concurrency in core and minor fixes

The first 10 patches of this series prepare nfpcore for concurrent
accesses.  This will be needed by upcoming hwmon and devlink patches.
Most locking is already in place, the patches in this series iron out
a few bugs.

Last 5 patches are fixes and cleanups to the netdev code, including
removal of doorbell pointers used only on old versions of the chip,
removal of unnecessarily defensive code and flushing xmit_more more
carefully on error paths.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -6,6 +6,7 @@ nfp-objs := \
nfpcore/nfp_cpplib.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
nfpcore/nfp_mutex.o \
nfpcore/nfp_nffw.o \
nfpcore/nfp_nsp.o \
nfpcore/nfp_nsp_eth.o \
......
......@@ -307,9 +307,7 @@ struct nfp_net_rx_buf {
* @rd_p: FL/RX ring read pointer (free running)
* @idx: Ring index from Linux's perspective
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
* @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue
* @qcp_fl: Pointer to base of the QCP freelist queue
* @qcp_rx: Pointer to base of the QCP RX queue
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for free list batching)
* @rxbufs: Array of transmitted FL/RX buffers
......@@ -324,13 +322,11 @@ struct nfp_net_rx_ring {
u32 wr_p;
u32 rd_p;
u16 idx;
u16 wr_ptr_add;
u32 idx;
u32 wr_ptr_add;
int fl_qcidx;
int rx_qcidx;
u8 __iomem *qcp_fl;
u8 __iomem *qcp_rx;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
......
......@@ -336,9 +336,9 @@ nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
if (dp->num_rx_rings > dp->num_r_vecs ||
dp->num_tx_rings > dp->num_r_vecs)
nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
dp->num_rx_rings, dp->num_tx_rings,
dp->num_r_vecs);
dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
dp->num_rx_rings, dp->num_tx_rings,
dp->num_r_vecs);
dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
......@@ -479,10 +479,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->r_vec = r_vec;
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
}
/**
......@@ -762,6 +759,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
qidx, tx_ring->wr_p, tx_ring->rd_p);
netif_tx_stop_queue(nd_q);
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_busy++;
u64_stats_update_end(&r_vec->tx_sync);
......@@ -870,6 +868,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
tx_ring->txbufs[wr_idx].fidx = -2;
err_free:
nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_errors++;
u64_stats_update_end(&r_vec->tx_sync);
......@@ -2128,7 +2127,11 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
}
static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
/**
* nfp_net_set_config_and_enable() - Write control BAR and enable NFP
* @nn: NFP Net device to reconfigure
*/
static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
u32 new_ctrl, update = 0;
unsigned int r;
......@@ -2177,6 +2180,10 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
if (err) {
nfp_net_clear_config_and_disable(nn);
return err;
}
nn->dp.ctrl = new_ctrl;
......@@ -2192,22 +2199,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
udp_tunnel_get_rx_info(nn->dp.netdev);
}
return err;
}
/**
* nfp_net_set_config_and_enable() - Write control BAR and enable NFP
* @nn: NFP Net device to reconfigure
*/
static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
int err;
err = __nfp_net_set_config_and_enable(nn);
if (err)
nfp_net_clear_config_and_disable(nn);
return err;
return 0;
}
/**
......@@ -2234,11 +2226,6 @@ static int nfp_net_netdev_open(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
int err, r;
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE) {
nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->dp.ctrl);
return -EBUSY;
}
/* Step 1: Allocate resources for rings and the like
* - Request interrupts
* - Allocate RX and TX ring resources
......@@ -2369,11 +2356,6 @@ static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
nn_err(nn, "Dev is not up: 0x%08x\n", nn->dp.ctrl);
return 0;
}
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
nfp_net_close_stack(nn);
......@@ -2458,7 +2440,7 @@ static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
return err;
}
return __nfp_net_set_config_and_enable(nn);
return nfp_net_set_config_and_enable(nn);
}
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
......
......@@ -40,9 +40,9 @@ static struct dentry *nfp_dir;
static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
{
int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_rx_ring *rx_ring;
int fl_rd_p, fl_wr_p, rxd_cnt;
struct nfp_net_rx_desc *rxd;
struct nfp_net *nn;
void *frag;
......@@ -61,14 +61,11 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl);
fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl);
rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx);
rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
seq_printf(file, "RX[%02d,%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n",
rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n",
rx_ring->idx, rx_ring->fl_qcidx,
rx_ring->cnt, &rx_ring->dma, rx_ring->rxds,
rx_ring->rd_p, rx_ring->wr_p,
fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p);
rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p);
for (i = 0; i < rxd_cnt; i++) {
rxd = &rx_ring->rxds[i];
......@@ -91,10 +88,6 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
seq_puts(file, " FL_RD");
if (i == fl_wr_p % rxd_cnt)
seq_puts(file, " FL_WR");
if (i == rx_rd_p % rxd_cnt)
seq_puts(file, " RX_RD");
if (i == rx_wr_p % rxd_cnt)
seq_puts(file, " RX_WR");
seq_putc(file, '\n');
}
......
......@@ -66,14 +66,7 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state,
/* Implemented in nfp_resource.c */
#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
/* NFP Resource Table self-identifier */
#define NFP_RESOURCE_TBL_NAME "nfp.res"
#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
/* All other keys are CRC32-POSIX of the 8-byte identification string */
/* All keys are CRC32-POSIX of the 8-byte identification string */
/* ARM/PCI vNIC Interfaces 0..3 */
#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0"
......
......@@ -217,7 +217,7 @@ static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
#define TARGET_WIDTH_64 8
static int
compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
u32 *bar_config, u64 *bar_base,
int tgt, int act, int tok, u64 offset, size_t size, int width)
{
......@@ -410,35 +410,36 @@ find_matching_bar(struct nfp6000_pcie *nfp,
/* Return EAGAIN if no resource is available */
static int
find_unused_bar_noblock(struct nfp6000_pcie *nfp,
find_unused_bar_noblock(const struct nfp6000_pcie *nfp,
int tgt, int act, int tok,
u64 offset, size_t size, int width)
{
int n, invalid = 0;
int n, busy = 0;
for (n = 0; n < nfp->bars; n++) {
struct nfp_bar *bar = &nfp->bar[n];
const struct nfp_bar *bar = &nfp->bar[n];
int err;
if (bar->bitsize == 0) {
invalid++;
continue;
}
if (atomic_read(&bar->refcnt) != 0)
if (!bar->bitsize)
continue;
/* Just check to see if we can make it fit... */
err = compute_bar(nfp, bar, NULL, NULL,
tgt, act, tok, offset, size, width);
if (err)
continue;
if (err < 0)
invalid++;
else
if (!atomic_read(&bar->refcnt))
return n;
busy++;
}
return (n == invalid) ? -EINVAL : -EAGAIN;
if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n",
tgt, act, tok, offset, size, width))
return -EINVAL;
return -EAGAIN;
}
static int
......
......@@ -65,39 +65,49 @@ struct nfp_cpp_resource {
u64 end;
};
struct nfp_cpp_mutex {
struct list_head list;
struct nfp_cpp *cpp;
int target;
u16 usage;
u16 depth;
unsigned long long address;
u32 key;
};
/**
* struct nfp_cpp - main nfpcore device structure
* Following fields are read-only after probe() exits or netdevs are spawned.
* @dev: embedded device structure
* @op: low-level implementation ops
* @priv: private data of the low-level implementation
* @model: chip model
* @interface: chip interface id we are using to reach it
* @serial: chip serial number
* @imb_cat_table: CPP Mapping Table
*
* Following fields can be used only in probe() or with rtnl held:
* @hwinfo: HWInfo database fetched from the device
* @rtsym: firmware run time symbols
*
* Following fields use explicit locking:
* @resource_list: NFP CPP resource list
* @resource_lock: protects @resource_list
*
* @area_cache_list: cached areas for cpp/xpb read/write speed up
* @area_cache_mutex: protects @area_cache_list
*
* @waitq: area wait queue
*/
struct nfp_cpp {
struct device dev;
void *priv; /* Private data of the low-level implementation */
void *priv;
u32 model;
u16 interface;
u8 serial[NFP_SERIAL_LEN];
const struct nfp_cpp_operations *op;
struct list_head resource_list; /* NFP CPP resource list */
struct list_head mutex_cache; /* Mutex cache */
struct list_head resource_list;
rwlock_t resource_lock;
wait_queue_head_t waitq;
/* NFP6000 CPP Mapping Table */
u32 imb_cat_table[16];
/* Cached areas for cpp/xpb readl/writel speedups */
struct mutex area_cache_mutex; /* Lock for the area cache */
struct mutex area_cache_mutex;
struct list_head area_cache_list;
/* Cached information */
void *hwinfo;
void *rtsym;
};
......@@ -187,24 +197,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp)
{
struct nfp_cpp_area_cache *cache, *ctmp;
struct nfp_cpp_resource *res, *rtmp;
struct nfp_cpp_mutex *mutex, *mtmp;
/* There should be no mutexes in the cache at this point. */
WARN_ON(!list_empty(&cpp->mutex_cache));
/* .. but if there are, unlock them and complain. */
list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
mutex->target, (unsigned long long)mutex->address,
mutex->depth, mutex->usage);
/* Forcing an unlock */
mutex->depth = 1;
nfp_cpp_mutex_unlock(mutex);
/* Forcing a free */
mutex->usage = 1;
nfp_cpp_mutex_free(mutex);
}
/* Remove all caches */
list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
......@@ -419,9 +411,43 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
*/
void nfp_cpp_area_free(struct nfp_cpp_area *area)
{
if (atomic_read(&area->refcount))
nfp_warn(area->cpp, "Warning: freeing busy area\n");
nfp_cpp_area_put(area);
}
static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
{
*status = area->cpp->op->area_acquire(area);
return *status != -EAGAIN;
}
static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
{
int err, status;
if (atomic_inc_return(&area->refcount) > 1)
return 0;
if (!area->cpp->op->area_acquire)
return 0;
err = wait_event_interruptible(area->cpp->waitq,
nfp_cpp_area_acquire_try(area, &status));
if (!err)
err = status;
if (err) {
nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
atomic_dec(&area->refcount);
return err;
}
nfp_cpp_area_get(area);
return 0;
}
/**
* nfp_cpp_area_acquire() - lock down a CPP area for access
* @area: CPP area handle
......@@ -433,27 +459,13 @@ void nfp_cpp_area_free(struct nfp_cpp_area *area)
*/
int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
{
mutex_lock(&area->mutex);
if (atomic_inc_return(&area->refcount) == 1) {
int (*a_a)(struct nfp_cpp_area *);
a_a = area->cpp->op->area_acquire;
if (a_a) {
int err;
int ret;
wait_event_interruptible(area->cpp->waitq,
(err = a_a(area)) != -EAGAIN);
if (err < 0) {
atomic_dec(&area->refcount);
mutex_unlock(&area->mutex);
return err;
}
}
}
mutex_lock(&area->mutex);
ret = __nfp_cpp_area_acquire(area);
mutex_unlock(&area->mutex);
nfp_cpp_area_get(area);
return 0;
return ret;
}
/**
......@@ -829,10 +841,7 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
* the need for special case code below when
* checking against available cache size.
*/
if (length == 0)
return NULL;
if (list_empty(&cpp->area_cache_list) || id == 0)
if (length == 0 || id == 0)
return NULL;
/* Remap from cpp_island to cpp_target */
......@@ -840,10 +849,15 @@ area_cache_get(struct nfp_cpp *cpp, u32 id,
if (err < 0)
return NULL;
addr += *offset;
mutex_lock(&cpp->area_cache_mutex);
if (list_empty(&cpp->area_cache_list)) {
mutex_unlock(&cpp->area_cache_mutex);
return NULL;
}
addr += *offset;
/* See if we have a match */
list_for_each_entry(cache, &cpp->area_cache_list, entry) {
if (id == cache->id &&
......@@ -937,12 +951,14 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
if (err)
goto out;
if (err) {
nfp_cpp_area_free(area);
return err;
}
}
err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
out:
if (cache)
area_cache_put(cpp, cache);
else
......@@ -979,13 +995,14 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
return -ENOMEM;
err = nfp_cpp_area_acquire(area);
if (err)
goto out;
if (err) {
nfp_cpp_area_free(area);
return err;
}
}
err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
out:
if (cache)
area_cache_put(cpp, cache);
else
......@@ -1127,7 +1144,6 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
INIT_LIST_HEAD(&cpp->mutex_cache);
INIT_LIST_HEAD(&cpp->resource_list);
INIT_LIST_HEAD(&cpp->area_cache_list);
mutex_init(&cpp->area_cache_mutex);
......@@ -1425,322 +1441,3 @@ void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
{
return &cpp_explicit[1];
}
/* THIS FUNCTION IS NOT EXPORTED */
static u32 nfp_mutex_locked(u16 interface)
{
return (u32)interface << 16 | 0x000f;
}
static u32 nfp_mutex_unlocked(u16 interface)
{
return (u32)interface << 16 | 0x0000;
}
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
}
static bool nfp_mutex_is_unlocked(u32 val)
{
return (val & 0xffff) == 0000;
}
/* If you need more than 65536 recursive locks, please rethink your code. */
#define MUTEX_DEPTH_MAX 0xffff
static int
nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
{
/* Not permitted on invalid interfaces */
if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
NFP_CPP_INTERFACE_TYPE_INVALID)
return -EINVAL;
/* Address must be 64-bit aligned */
if (address & 7)
return -EINVAL;
if (*target != NFP_CPP_TARGET_MU)
return -EINVAL;
return 0;
}
/**
* nfp_cpp_mutex_init() - Initialize a mutex location
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
* @key: Unique 32-bit value for this mutex
*
* The CPP target:address must point to a 64-bit aligned location, and
* will initialize 64 bits of data at the location.
*
* This creates the initial mutex state, as locked by this
* nfp_cpp_interface().
*
* This function should only be called when setting up
* the initial lock state upon boot-up of the system.
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
int target, unsigned long long address, u32 key)
{
const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
u16 interface = nfp_cpp_interface(cpp);
int err;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return err;
err = nfp_cpp_writel(cpp, muw, address + 4, key);
if (err)
return err;
err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
if (err)
return err;
return 0;
}
/**
* nfp_cpp_mutex_alloc() - Create a mutex handle
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
* @key: 32-bit unique key (must match the key at this location)
*
* The CPP target:address must point to a 64-bit aligned location, and
* reserve 64 bits of data at the location for use by the handle.
*
* Only target/address pairs that point to entities that support the
* MU Atomic Engine's CmpAndSwap32 command are supported.
*
* Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
*/
struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
unsigned long long address, u32 key)
{
const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
u16 interface = nfp_cpp_interface(cpp);
struct nfp_cpp_mutex *mutex;
int err;
u32 tmp;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return NULL;
/* Look for mutex on cache list */
list_for_each_entry(mutex, &cpp->mutex_cache, list) {
if (mutex->target == target && mutex->address == address) {
mutex->usage++;
return mutex;
}
}
err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
if (err < 0)
return NULL;
if (tmp != key)
return NULL;
mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
if (!mutex)
return NULL;
mutex->cpp = cpp;
mutex->target = target;
mutex->address = address;
mutex->key = key;
mutex->depth = 0;
mutex->usage = 1;
/* Add mutex to cache list */
list_add(&mutex->list, &cpp->mutex_cache);
return mutex;
}
/**
* nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
* @mutex: NFP CPP Mutex handle
*/
void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
{
if (--mutex->usage)
return;
/* Remove mutex from cache */
list_del(&mutex->list);
kfree(mutex);
}
/**
* nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
* @mutex: NFP CPP Mutex handle
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
unsigned long warn_at = jiffies + 15 * HZ;
unsigned int timeout_ms = 1;
int err;
/* We can't use a waitqueue here, because the unlocker
* might be on a separate CPU.
*
* So just wait for now.
*/
for (;;) {
err = nfp_cpp_mutex_trylock(mutex);
if (err != -EBUSY)
break;
err = msleep_interruptible(timeout_ms);
if (err != 0)
return -ERESTARTSYS;
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + 60 * HZ;
dev_warn(mutex->cpp->dev.parent,
"Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
mutex->usage, mutex->depth,
mutex->target, mutex->address, mutex->key);
}
}
return err;
}
/**
* nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
* @mutex: NFP CPP Mutex handle
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
{
const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
struct nfp_cpp *cpp = mutex->cpp;
u32 key, value;
u16 interface;
int err;
interface = nfp_cpp_interface(cpp);
if (mutex->depth > 1) {
mutex->depth--;
return 0;
}
err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
if (err < 0)
return err;
if (key != mutex->key)
return -EPERM;
err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
if (err < 0)
return err;
if (value != nfp_mutex_locked(interface))
return -EACCES;
err = nfp_cpp_writel(cpp, muw, mutex->address,
nfp_mutex_unlocked(interface));
if (err < 0)
return err;
mutex->depth = 0;
return 0;
}
/**
* nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
* @mutex: NFP CPP Mutex handle
*
* Return: 0 if the lock succeeded, -errno on failure
*/
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
{
const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
struct nfp_cpp *cpp = mutex->cpp;
u32 key, value, tmp;
int err;
if (mutex->depth > 0) {
if (mutex->depth == MUTEX_DEPTH_MAX)
return -E2BIG;
mutex->depth++;
return 0;
}
/* Verify that the lock marker is not damaged */
err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
if (err < 0)
return err;
if (key != mutex->key)
return -EPERM;
/* Compare against the unlocked state, and if true,
* write the interface id into the top 16 bits, and
* mark as locked.
*/
value = nfp_mutex_locked(nfp_cpp_interface(cpp));
/* We use test_set_imm here, as it implies a read
* of the current state, and sets the bits in the
* bytemask of the command to 1s. Since the mutex
* is guaranteed to be 64-bit aligned, the bytemask
* of this 32-bit command is ensured to be 8'b00001111,
* which implies that the lower 4 bits will be set to
* ones regardless of the initial state.
*
* Since this is a 'Readback' operation, with no Pull
* data, we can treat this as a normal Push (read)
* atomic, which returns the original value.
*/
err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
if (err < 0)
return err;
/* Was it unlocked? */
if (nfp_mutex_is_unlocked(tmp)) {
/* The read value can only be 0x....0000 in the unlocked state.
* If there was another contending for this lock, then
* the lock state would be 0x....000f
*/
/* Write our owner ID into the lock
* While not strictly necessary, this helps with
* debug and bookkeeping.
*/
err = nfp_cpp_writel(cpp, muw, mutex->address, value);
if (err < 0)
return err;
mutex->depth = 1;
return 0;
}
/* Already locked by us? Success! */
if (tmp == value) {
mutex->depth = 1;
return 0;
}
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
struct nfp_cpp_mutex {
struct nfp_cpp *cpp;
int target;
u16 depth;
unsigned long long address;
u32 key;
};
static u32 nfp_mutex_locked(u16 interface)
{
return (u32)interface << 16 | 0x000f;
}
static u32 nfp_mutex_unlocked(u16 interface)
{
return (u32)interface << 16 | 0x0000;
}
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
}
static bool nfp_mutex_is_unlocked(u32 val)
{
return (val & 0xffff) == 0000;
}
/* If you need more than 65536 recursive locks, please rethink your code. */
#define NFP_MUTEX_DEPTH_MAX 0xffff
static int
nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
{
/* Not permitted on invalid interfaces */
if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
NFP_CPP_INTERFACE_TYPE_INVALID)
return -EINVAL;
/* Address must be 64-bit aligned */
if (address & 7)
return -EINVAL;
if (*target != NFP_CPP_TARGET_MU)
return -EINVAL;
return 0;
}
/**
* nfp_cpp_mutex_init() - Initialize a mutex location
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
* @key: Unique 32-bit value for this mutex
*
* The CPP target:address must point to a 64-bit aligned location, and
* will initialize 64 bits of data at the location.
*
* This creates the initial mutex state, as locked by this
* nfp_cpp_interface().
*
* This function should only be called when setting up
* the initial lock state upon boot-up of the system.
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
int target, unsigned long long address, u32 key)
{
const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
u16 interface = nfp_cpp_interface(cpp);
int err;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return err;
err = nfp_cpp_writel(cpp, muw, address + 4, key);
if (err)
return err;
err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
if (err)
return err;
return 0;
}
/**
* nfp_cpp_mutex_alloc() - Create a mutex handle
* @cpp: NFP CPP handle
* @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
* @address: Offset into the address space of the NFP CPP target ID
* @key: 32-bit unique key (must match the key at this location)
*
* The CPP target:address must point to a 64-bit aligned location, and
* reserve 64 bits of data at the location for use by the handle.
*
* Only target/address pairs that point to entities that support the
* MU Atomic Engine's CmpAndSwap32 command are supported.
*
* Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
*/
struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
unsigned long long address, u32 key)
{
const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
u16 interface = nfp_cpp_interface(cpp);
struct nfp_cpp_mutex *mutex;
int err;
u32 tmp;
err = nfp_cpp_mutex_validate(interface, &target, address);
if (err)
return NULL;
err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
if (err < 0)
return NULL;
if (tmp != key)
return NULL;
mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
if (!mutex)
return NULL;
mutex->cpp = cpp;
mutex->target = target;
mutex->address = address;
mutex->key = key;
mutex->depth = 0;
return mutex;
}
/**
* nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
* @mutex: NFP CPP Mutex handle
*/
void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
{
kfree(mutex);
}
/**
* nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
* @mutex: NFP CPP Mutex handle
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
{
unsigned long warn_at = jiffies + 15 * HZ;
unsigned int timeout_ms = 1;
int err;
/* We can't use a waitqueue here, because the unlocker
* might be on a separate CPU.
*
* So just wait for now.
*/
for (;;) {
err = nfp_cpp_mutex_trylock(mutex);
if (err != -EBUSY)
break;
err = msleep_interruptible(timeout_ms);
if (err != 0)
return -ERESTARTSYS;
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + 60 * HZ;
nfp_warn(mutex->cpp,
"Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n",
mutex->depth,
mutex->target, mutex->address, mutex->key);
}
}
return err;
}
/**
* nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
* @mutex: NFP CPP Mutex handle
*
* Return: 0 on success, or -errno on failure
*/
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
{
const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
struct nfp_cpp *cpp = mutex->cpp;
u32 key, value;
u16 interface;
int err;
interface = nfp_cpp_interface(cpp);
if (mutex->depth > 1) {
mutex->depth--;
return 0;
}
err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
if (err < 0)
return err;
if (key != mutex->key)
return -EPERM;
err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
if (err < 0)
return err;
if (value != nfp_mutex_locked(interface))
return -EACCES;
err = nfp_cpp_writel(cpp, muw, mutex->address,
nfp_mutex_unlocked(interface));
if (err < 0)
return err;
mutex->depth = 0;
return 0;
}
/**
* nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
* @mutex: NFP CPP Mutex handle
*
* Return: 0 if the lock succeeded, -errno on failure
*/
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
{
const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
struct nfp_cpp *cpp = mutex->cpp;
u32 key, value, tmp;
int err;
if (mutex->depth > 0) {
if (mutex->depth == NFP_MUTEX_DEPTH_MAX)
return -E2BIG;
mutex->depth++;
return 0;
}
/* Verify that the lock marker is not damaged */
err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
if (err < 0)
return err;
if (key != mutex->key)
return -EPERM;
/* Compare against the unlocked state, and if true,
* write the interface id into the top 16 bits, and
* mark as locked.
*/
value = nfp_mutex_locked(nfp_cpp_interface(cpp));
/* We use test_set_imm here, as it implies a read
* of the current state, and sets the bits in the
* bytemask of the command to 1s. Since the mutex
* is guaranteed to be 64-bit aligned, the bytemask
* of this 32-bit command is ensured to be 8'b00001111,
* which implies that the lower 4 bits will be set to
* ones regardless of the initial state.
*
* Since this is a 'Readback' operation, with no Pull
* data, we can treat this as a normal Push (read)
* atomic, which returns the original value.
*/
err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
if (err < 0)
return err;
/* Was it unlocked? */
if (nfp_mutex_is_unlocked(tmp)) {
/* The read value can only be 0x....0000 in the unlocked state.
* If there was another contending for this lock, then
* the lock state would be 0x....000f
*/
/* Write our owner ID into the lock
* While not strictly necessary, this helps with
* debug and bookkeeping.
*/
err = nfp_cpp_writel(cpp, muw, mutex->address, value);
if (err < 0)
return err;
mutex->depth = 1;
return 0;
}
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
......@@ -209,9 +209,8 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
if ((*reg & mask) == val)
return 0;
err = msleep_interruptible(100);
if (err)
return err;
if (msleep_interruptible(25))
return -ERESTARTSYS;
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
......@@ -228,7 +227,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
*
* Return: 0 for success with no result
*
* 1..255 for NSP completion with a result code
* positive value for NSP completion with a result code
*
* -EAGAIN if the NSP is not yet present
* -ENODEV if the NSP is not a supported model
......@@ -380,9 +379,10 @@ int nfp_nsp_wait(struct nfp_nsp *state)
if (err != -EAGAIN)
break;
err = msleep_interruptible(100);
if (err)
if (msleep_interruptible(25)) {
err = -ERESTARTSYS;
break;
}
if (time_after(start_time, wait_until)) {
err = -ETIMEDOUT;
......
......@@ -45,6 +45,13 @@
#include "nfp_cpp.h"
#include "nfp6000/nfp6000.h"
#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
/* NFP Resource Table self-identifier */
#define NFP_RESOURCE_TBL_NAME "nfp.res"
#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
#define NFP_RESOURCE_ENTRY_NAME_SZ 8
/**
......@@ -100,9 +107,11 @@ static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
strncpy(name_pad, res->name, sizeof(name_pad));
/* Search for a matching entry */
key = NFP_RESOURCE_TBL_KEY;
if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
key = crc32_posix(name_pad, sizeof(name_pad));
if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
nfp_err(cpp, "Grabbing device lock not supported\n");
return -EOPNOTSUPP;
}
key = crc32_posix(name_pad, sizeof(name_pad));
for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
u64 addr = NFP_RESOURCE_TBL_BASE +
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册