提交 09d36071 编写于 作者: D David S. Miller

Merge branch 'nfp-dma-adjust_head-fixes'

Jakub Kicinski says:

====================
nfp: DMA flags, adjust head and fixes

This series takes advantage of Alex's DMA_ATTR_SKIP_CPU_SYNC to make
XDP packet modifications "correct" from DMA API point of view.  It
also allows us to parse the metadata before we run XDP at no additional
DMA sync cost.  That way we can get rid of the metadata memcpy, and
remove the last upstream user of bpf_prog->xdp_adjust_head.

David's patch adds a way to read capabilities from the management
firmware.

There are also two net-next fixes.  Patch 4 which fixes what seems to
be a result of a botched rebase on my part.  Patch 5 corrects locking
when state of ethernet ports is being refreshed.

v3: move the sync from alloc func to the actual give to hw func
v2: sync rx buffers before giving them to the card (Alex)
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -9,6 +9,7 @@ nfp-objs := \
nfpcore/nfp_mutex.o \
nfpcore/nfp_nffw.o \
nfpcore/nfp_nsp.o \
nfpcore/nfp_nsp_cmds.o \
nfpcore/nfp_nsp_eth.o \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
......
......@@ -253,6 +253,7 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
struct nfp_nsp_identify *nspi;
struct nfp_nsp *nsp;
int err;
......@@ -269,6 +270,12 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
nspi = __nfp_nsp_identify(nsp);
if (nspi) {
dev_info(&pdev->dev, "BSP: %s\n", nspi->version);
kfree(nspi);
}
err = nfp_fw_load(pdev, pf, nsp);
if (err < 0) {
kfree(pf->eth_tbl);
......
......@@ -284,6 +284,12 @@ struct nfp_net_rx_desc {
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
struct nfp_meta_parsed {
u32 hash_type;
u32 hash;
u32 mark;
};
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
......@@ -813,7 +819,8 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new);
bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
void nfp_net_refresh_port_config(struct nfp_net *nn);
int nfp_net_refresh_eth_port(struct nfp_net *nn);
void nfp_net_refresh_port_table(struct nfp_net *nn);
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
......
......@@ -87,16 +87,31 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
{
return dma_map_single(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static void
nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
{
dma_sync_single_for_device(dp->dev, dma_addr,
dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
dp->rx_dma_dir);
}
static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
{
dma_unmap_single(dp->dev, dma_addr,
dma_unmap_single_attrs(dp->dev, dma_addr,
dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
dp->rx_dma_dir);
dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
unsigned int len)
{
dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
len, dp->rx_dma_dir);
}
/* Firmware reconfig
......@@ -1208,6 +1223,8 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
nfp_net_dma_sync_dev_rx(dp, dma_addr);
/* Stash SKB and DMA address away */
rx_ring->rxbufs[wr_idx].frag = frag;
rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
......@@ -1385,7 +1402,8 @@ static void nfp_net_rx_csum(struct nfp_net_dp *dp,
}
}
static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
static void
nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
unsigned int type, __be32 *hash)
{
if (!(netdev->features & NETIF_F_RXHASH))
......@@ -1395,16 +1413,18 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
meta->hash_type = PKT_HASH_TYPE_L3;
break;
default:
skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
meta->hash_type = PKT_HASH_TYPE_L4;
break;
}
meta->hash = get_unaligned_be32(hash);
}
static void
nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, struct nfp_net_rx_desc *rxd)
{
struct nfp_net_rx_hash *rx_hash = data;
......@@ -1412,12 +1432,12 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
return;
nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
&rx_hash->hash);
}
static void *
nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, int meta_len)
{
u32 meta_info;
......@@ -1429,13 +1449,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
switch (meta_info & NFP_NET_META_FIELD_MASK) {
case NFP_NET_META_HASH:
meta_info >>= NFP_NET_META_FIELD_SIZE;
nfp_net_set_hash(netdev, skb,
nfp_net_set_hash(netdev, meta,
meta_info & NFP_NET_META_FIELD_MASK,
(__be32 *)data);
data += 4;
break;
case NFP_NET_META_MARK:
skb->mark = get_unaligned_be32(data);
meta->mark = get_unaligned_be32(data);
data += 4;
break;
default:
......@@ -1569,13 +1589,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
unsigned int meta_len, data_len, data_off, pkt_len;
u8 meta_prepend[NFP_NET_MAX_PREPEND];
unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
dma_addr_t new_dma_addr;
void *new_frag;
u8 *meta;
idx = rx_ring->rd_p & (rx_ring->cnt - 1);
......@@ -1588,6 +1607,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
*/
dma_rmb();
memset(&meta, 0, sizeof(meta));
rx_ring->rd_p++;
pkts_polled++;
......@@ -1608,11 +1629,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len;
pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
data_off = NFP_NET_RX_BUF_HEADROOM + meta_len;
pkt_off += meta_len;
else
data_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_offset;
data_off += dp->rx_dma_off;
pkt_off += dp->rx_offset;
meta_off = pkt_off - meta_len;
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
......@@ -1620,9 +1642,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync);
/* Pointer to start of metadata */
meta = rxbuf->frag + data_off - meta_len;
if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
(dp->rx_offset && meta_len > dp->rx_offset))) {
nn_dp_warn(dp, "oversized RX packet metadata %u\n",
......@@ -1631,6 +1650,26 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
data_len);
if (!dp->chained_metadata_format) {
nfp_net_set_hash_desc(dp->netdev, &meta,
rxbuf->frag + meta_off, rxd);
} else if (meta_len) {
void *end;
end = nfp_net_parse_meta(dp->netdev, &meta,
rxbuf->frag + meta_off,
meta_len);
if (unlikely(end != rxbuf->frag + pkt_off)) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
continue;
}
}
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp)) {
unsigned int dma_off;
......@@ -1638,24 +1677,14 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
int act;
hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
dma_sync_single_for_cpu(dp->dev, rxbuf->dma_addr,
dma_off + pkt_len,
DMA_BIDIRECTIONAL);
/* Move prepend out of the way */
if (xdp_prog->xdp_adjust_head) {
memcpy(meta_prepend, meta, meta_len);
meta = meta_prepend;
}
act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
&data_off, &pkt_len);
&pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
dma_off = data_off - NFP_NET_RX_BUF_HEADROOM;
dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
dma_off,
......@@ -1689,22 +1718,11 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
skb_reserve(skb, data_off);
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
if (!dp->chained_metadata_format) {
nfp_net_set_hash_desc(dp->netdev, skb, meta, rxd);
} else if (meta_len) {
void *end;
end = nfp_net_parse_meta(dp->netdev, skb, meta,
meta_len);
if (unlikely(end != meta + meta_len)) {
nn_dp_warn(dp, "invalid RX packet metadata\n");
nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
}
skb->mark = meta.mark;
skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, dp->netdev);
......@@ -2147,7 +2165,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
*/
static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
u32 new_ctrl, update = 0;
u32 bufsz, new_ctrl, update = 0;
unsigned int r;
int err;
......@@ -2181,8 +2199,9 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
nfp_net_write_mac_addr(nn);
nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ,
nn->dp.fl_bufsz - NFP_NET_RX_BUF_NON_DATA);
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
/* Enable device */
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
......
......@@ -211,10 +211,15 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
return 0;
/* Use link speed from ETH table if available, otherwise try the BAR */
if (nn->eth_port && nfp_net_link_changed_read_clear(nn))
nfp_net_refresh_port_config(nn);
/* Separate if - on FW error the port could've disappeared from table */
if (nn->eth_port) {
int err;
if (nfp_net_link_changed_read_clear(nn)) {
err = nfp_net_refresh_eth_port(nn);
if (err)
return err;
}
cmd->base.port = nn->eth_port->port_type;
cmd->base.speed = nn->eth_port->speed;
cmd->base.duplex = DUPLEX_FULL;
......@@ -273,7 +278,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (err > 0)
return 0; /* no change */
nfp_net_refresh_port_config(nn);
nfp_net_refresh_port_table(nn);
return err;
......
......@@ -176,13 +176,13 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
}
static struct nfp_eth_table_port *
nfp_net_find_port(struct nfp_pf *pf, unsigned int id)
nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
{
int i;
for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
if (pf->eth_tbl->ports[i].eth_index == id)
return &pf->eth_tbl->ports[i];
for (i = 0; eth_tbl && i < eth_tbl->count; i++)
if (eth_tbl->ports[i].eth_index == id)
return &eth_tbl->ports[i];
return NULL;
}
......@@ -367,7 +367,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
eth_port = nfp_net_find_port(pf, i);
eth_port = nfp_net_find_port(pf->eth_tbl, i);
if (eth_port && eth_port->override_changed) {
nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
} else {
......@@ -485,6 +485,7 @@ static void nfp_net_refresh_netdevs(struct work_struct *work)
{
struct nfp_pf *pf = container_of(work, struct nfp_pf,
port_refresh_work);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
mutex_lock(&pf->port_lock);
......@@ -493,6 +494,27 @@ static void nfp_net_refresh_netdevs(struct work_struct *work)
if (list_empty(&pf->ports))
goto out;
list_for_each_entry(nn, &pf->ports, port_list)
nfp_net_link_changed_read_clear(nn);
eth_table = nfp_eth_read_ports(pf->cpp);
if (!eth_table) {
nfp_err(pf->cpp, "Error refreshing port config!\n");
goto out;
}
rtnl_lock();
list_for_each_entry(nn, &pf->ports, port_list) {
if (!nn->eth_port)
continue;
nn->eth_port = nfp_net_find_port(eth_table,
nn->eth_port->eth_index);
}
rtnl_unlock();
kfree(pf->eth_tbl);
pf->eth_tbl = eth_table;
list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
if (!nn->eth_port) {
nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
......@@ -517,31 +539,36 @@ static void nfp_net_refresh_netdevs(struct work_struct *work)
mutex_unlock(&pf->port_lock);
}
void nfp_net_refresh_port_config(struct nfp_net *nn)
void nfp_net_refresh_port_table(struct nfp_net *nn)
{
struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
struct nfp_eth_table *old_table;
ASSERT_RTNL();
schedule_work(&pf->port_refresh_work);
}
old_table = pf->eth_tbl;
int nfp_net_refresh_eth_port(struct nfp_net *nn)
{
struct nfp_eth_table_port *eth_port;
struct nfp_eth_table *eth_table;
list_for_each_entry(nn, &pf->ports, port_list)
nfp_net_link_changed_read_clear(nn);
eth_table = nfp_eth_read_ports(nn->cpp);
if (!eth_table) {
nn_err(nn, "Error refreshing port state table!\n");
return -EIO;
}
pf->eth_tbl = nfp_eth_read_ports(pf->cpp);
if (!pf->eth_tbl) {
pf->eth_tbl = old_table;
nfp_err(pf->cpp, "Error refreshing port config!\n");
return;
eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
if (!eth_port) {
nn_err(nn, "Error finding state of the port!\n");
kfree(eth_table);
return -EIO;
}
list_for_each_entry(nn, &pf->ports, port_list)
nn->eth_port = nfp_net_find_port(pf, nn->eth_port->eth_index);
memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
kfree(old_table);
kfree(eth_table);
schedule_work(&pf->port_refresh_work);
return 0;
}
/*
......
......@@ -63,6 +63,7 @@ void nfp_nsp_config_clear_state(struct nfp_nsp *state);
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size);
int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
/* Implemented in nfp_resource.c */
......
......@@ -93,6 +93,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
__MAX_SPCODE,
};
......@@ -493,3 +494,9 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state,
return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
NULL, 0);
}
int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
{
return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
buf, size);
}
......@@ -147,4 +147,28 @@ int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
/**
* struct nfp_nsp_identify - NSP static information
* @version: opaque version string
* @flags: version flags
* @br_primary: branch id of primary bootloader
* @br_secondary: branch id of secondary bootloader
* @br_nsp: branch id of NSP
* @primary: version of primarary bootloader
* @secondary: version id of secondary bootloader
* @nsp: version id of NSP
*/
struct nfp_nsp_identify {
char version[40];
u8 flags;
u8 br_primary;
u8 br_secondary;
u8 br_nsp;
u16 primary;
u16 secondary;
u16 nsp;
};
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
#endif
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "nfp.h"
#include "nfp_nsp.h"
struct nsp_identify {
u8 version[40];
u8 flags;
u8 br_primary;
u8 br_secondary;
u8 br_nsp;
__le16 primary;
__le16 secondary;
__le16 nsp;
__le16 reserved;
};
struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp)
{
struct nfp_nsp_identify *nspi = NULL;
struct nsp_identify *ni;
int ret;
if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
return NULL;
ni = kzalloc(sizeof(*ni), GFP_KERNEL);
if (!ni)
return NULL;
ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
if (ret < 0) {
nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n",
ret);
goto exit_free;
}
nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
if (!nspi)
goto exit_free;
memcpy(nspi->version, ni->version, sizeof(nspi->version));
nspi->version[sizeof(nspi->version) - 1] = '\0';
nspi->flags = ni->flags;
nspi->br_primary = ni->br_primary;
nspi->br_secondary = ni->br_secondary;
nspi->br_nsp = ni->br_nsp;
nspi->primary = le16_to_cpu(ni->primary);
nspi->secondary = le16_to_cpu(ni->secondary);
nspi->nsp = le16_to_cpu(ni->nsp);
exit_free:
kfree(ni);
return nspi;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册