提交 884fa4f3 编写于 作者: D Doug Ledford

Merge branches 'chelsio', 'debug-cleanup', 'hns' and 'i40iw' into merge-test

......@@ -156,7 +156,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM;
goto error1;
}
......
......@@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
int err = 0;
table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
if (!table) {
pr_warn("failed to allocate ib gid cache for %s\n",
ib_dev->name);
if (!table)
return -ENOMEM;
}
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
u8 rdma_port = port + rdma_start_port(ib_dev);
......@@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL);
if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
pr_warn("Couldn't allocate cache for %s\n", device->name);
return -ENOMEM;
err = -ENOMEM;
goto free;
}
err = gid_table_setup_one(device);
if (err)
/* Allocated memory will be cleaned in the release function */
return err;
goto free;
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
ib_cache_update(device, p + rdma_start_port(device));
......@@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
err:
gid_table_cleanup_one(device);
free:
kfree(device->cache.pkey_cache);
kfree(device->cache.lmc_cache);
return err;
}
......
......@@ -254,11 +254,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
unsigned long flags;
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) {
pr_warn("Couldn't allocate client context for %s/%s\n",
device->name, client->name);
if (!context)
return -ENOMEM;
}
context->client = client;
context->data = NULL;
......
......@@ -247,7 +247,6 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL);
if (!pool->cache_bucket) {
pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM;
goto out_free_pool;
}
......
......@@ -604,7 +604,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
}
rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
if (!rem_info) {
pr_err("%s: Unable to allocate a remote info\n", __func__);
ret = -ENOMEM;
return ret;
}
......
......@@ -62,7 +62,6 @@ int iwpm_init(u8 nl_client)
sizeof(struct hlist_head), GFP_KERNEL);
if (!iwpm_hash_bucket) {
ret = -ENOMEM;
pr_err("%s Unable to create mapinfo hash table\n", __func__);
goto init_exit;
}
iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
......@@ -70,7 +69,6 @@ int iwpm_init(u8 nl_client)
if (!iwpm_reminfo_bucket) {
kfree(iwpm_hash_bucket);
ret = -ENOMEM;
pr_err("%s Unable to create reminfo hash table\n", __func__);
goto init_exit;
}
}
......@@ -128,10 +126,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
if (!iwpm_valid_client(nl_client))
return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
if (!map_info) {
pr_err("%s: Unable to allocate a mapping info\n", __func__);
if (!map_info)
return -ENOMEM;
}
memcpy(&map_info->local_sockaddr, local_sockaddr,
sizeof(struct sockaddr_storage));
memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
......@@ -309,10 +306,9 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
if (!nlmsg_request) {
pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
if (!nlmsg_request)
return NULL;
}
spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
......
......@@ -816,7 +816,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) {
ret = -ENOMEM;
dev_err(&device->dev, "No memory for ib_mad_local_private\n");
goto out;
}
local->mad_priv = NULL;
......@@ -824,7 +823,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
dev_err(&device->dev, "No memory for local response MAD\n");
kfree(local);
goto out;
}
......@@ -947,9 +945,6 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
if (!seg) {
dev_err(&send_buf->mad_agent->device->dev,
"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
sizeof (*seg) + seg_size, gfp_mask);
free_send_rmpp_list(send_wr);
return -ENOMEM;
}
......@@ -1362,12 +1357,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{
/* Allocate management method table */
*method = kzalloc(sizeof **method, GFP_ATOMIC);
if (!*method) {
pr_err("No memory for ib_mad_mgmt_method_table\n");
return -ENOMEM;
}
return 0;
return (*method) ? 0 : (-ENOMEM);
}
/*
......@@ -1458,8 +1448,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) {
dev_err(&agent_priv->agent.device->dev,
"No memory for ib_mad_mgmt_class_table\n");
ret = -ENOMEM;
goto error1;
}
......@@ -1524,22 +1512,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
if (!vendor) {
dev_err(&agent_priv->agent.device->dev,
"No memory for ib_mad_mgmt_vendor_class_table\n");
if (!vendor)
goto error1;
}
*vendor_table = vendor;
}
if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
if (!vendor_class) {
dev_err(&agent_priv->agent.device->dev,
"No memory for ib_mad_mgmt_vendor_class\n");
if (!vendor_class)
goto error2;
}
(*vendor_table)->vendor_class[vclass] = vendor_class;
}
......@@ -2238,11 +2220,8 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
mad_size = recv->mad_size;
response = alloc_mad_private(mad_size, GFP_KERNEL);
if (!response) {
dev_err(&port_priv->device->dev,
"%s: no memory for response buffer\n", __func__);
if (!response)
goto out;
}
if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
......@@ -2869,8 +2848,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
if (!mad_priv) {
dev_err(&qp_info->port_priv->device->dev,
"No memory for receive buffer\n");
ret = -ENOMEM;
break;
}
......@@ -2961,11 +2938,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) {
dev_err(&port_priv->device->dev,
"Couldn't kmalloc ib_qp_attr\n");
if (!attr)
return -ENOMEM;
}
ret = ib_find_pkey(port_priv->device, port_priv->port_num,
IB_DEFAULT_PKEY_FULL, &pkey_index);
......@@ -3135,10 +3109,8 @@ static int ib_mad_port_open(struct ib_device *device,
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
dev_err(&device->dev, "No memory for ib_mad_port_private\n");
if (!port_priv)
return -ENOMEM;
}
port_priv->device = device;
port_priv->port_num = port_num;
......
......@@ -304,10 +304,9 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
for_ifa(in_dev) {
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
if (!entry)
continue;
}
entry->ip.sin_family = AF_INET;
entry->ip.sin_addr.s_addr = ifa->ifa_address;
list_add_tail(&entry->list, &sin_list);
......@@ -348,10 +347,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
if (!entry)
continue;
}
entry->sin6.sin6_family = AF_INET6;
entry->sin6.sin6_addr = ifp->addr;
......@@ -459,10 +456,8 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
struct upper_list *entry = kmalloc(sizeof(*entry),
GFP_ATOMIC);
if (!entry) {
pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
if (!entry)
continue;
}
list_add_tail(&entry->list, &upper_list);
dev_hold(upper);
......@@ -555,10 +550,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
struct netdev_event_work *ndev_work =
kmalloc(sizeof(*ndev_work), GFP_KERNEL);
if (!ndev_work) {
pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
if (!ndev_work)
return NOTIFY_DONE;
}
memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
......@@ -692,10 +685,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
}
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
if (!work)
return NOTIFY_DONE;
}
INIT_WORK(&work->work, update_gid_event_work_handler);
......
......@@ -1104,8 +1104,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
if (!ib_safe_file_access(filp)) {
pr_err_once("ucm_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
task_tgid_vnr(current), current->comm);
return -EACCES;
}
if (len < sizeof(hdr))
return -EINVAL;
......
......@@ -1584,8 +1584,11 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
if (!ib_safe_file_access(filp)) {
pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
task_tgid_vnr(current), current->comm);
return -EACCES;
}
if (len < sizeof(hdr))
return -EINVAL;
......
......@@ -749,8 +749,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
if (!ib_safe_file_access(filp)) {
pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
task_tgid_vnr(current), current->comm);
return -EACCES;
}
if (count < sizeof hdr)
return -EINVAL;
......
......@@ -45,10 +45,9 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
int size = 32;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __func__);
if (!m)
return;
}
m->mem_id = MEM_PMRX;
m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
m->len = size;
......@@ -82,10 +81,9 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
size = npages * sizeof(u64);
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __func__);
if (!m)
return;
}
m->mem_id = MEM_PMRX;
m->addr = pbl_addr;
m->len = size;
......@@ -144,10 +142,9 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __func__);
if (!m)
return;
}
m->mem_id = MEM_PMRX;
m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
m->len = size;
......@@ -177,10 +174,9 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __func__);
if (!m)
return;
}
m->mem_id = MEM_CM;
m->addr = hwtid * size;
m->len = size;
......
......@@ -843,8 +843,6 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
if (rdev->wr_log) {
rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
atomic_set(&rdev->wr_log_idx, 0);
} else {
pr_err(MOD "error allocating wr_log. Logging disabled\n");
}
}
......@@ -1426,8 +1424,6 @@ static void recover_queues(struct uld_ctx *ctx)
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
if (!qp_list.qps) {
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
spin_unlock_irq(&ctx->dev->lock);
return;
}
......
......@@ -2053,7 +2053,6 @@ int init_credit_return(struct hfi1_devdata *dd)
sizeof(struct credit_return_base),
GFP_KERNEL);
if (!dd->cr_base) {
dd_dev_err(dd, "Unable to allocate credit return base\n");
ret = -ENOMEM;
goto done;
}
......
......@@ -61,9 +61,10 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
return ret;
}
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
int rr)
{
hns_roce_bitmap_free_range(bitmap, obj, 1);
hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
}
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
......@@ -106,7 +107,8 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt)
unsigned long obj, int cnt,
int rr)
{
int i;
......@@ -116,7 +118,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
bitmap->last = min(bitmap->last, obj);
if (!rr)
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
......
......@@ -216,10 +216,10 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
goto out;
/*
* It is timeout when wait_for_completion_timeout return 0
* The return value is the time limit set in advance
* how many seconds showing
*/
* It is timeout when wait_for_completion_timeout return 0
* The return value is the time limit set in advance
* how many seconds showing
*/
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
......
......@@ -34,6 +34,7 @@
#define _HNS_ROCE_CMD_H
#define HNS_ROCE_MAILBOX_SIZE 4096
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
/* TPT commands */
......@@ -57,17 +58,6 @@ enum {
HNS_ROCE_CMD_QUERY_QP = 0x22,
};
enum {
HNS_ROCE_CMD_TIME_CLASS_A = 10000,
HNS_ROCE_CMD_TIME_CLASS_B = 10000,
HNS_ROCE_CMD_TIME_CLASS_C = 10000,
};
struct hns_roce_cmd_mailbox {
void *buf;
dma_addr_t dma;
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout);
......
......@@ -57,6 +57,32 @@
#define roce_set_bit(origin, shift, val) \
roce_set_field((origin), (1ul << (shift)), (shift), (val))
/*
* roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon
* SOC, check if a is less than b.
* @a: hardware index value
* @b: hardware index value
* @bits: the number of bits of a and b, range: 0~31.
*
* Hardware index increases continuously till max value, and then restart
* from zero, again and again. Because the bits of reg field is often
* limited, the reg field can only hold the low bits of the hardware index
* in hisilicon SOC.
* In some scenes we need to compare two values(a,b) getted from two reg
* fields in this driver, for example:
* If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has
* incresed from 0xffff to 0x1 and a is less than b.
* If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a
* is bigger than b.
*
* Return true on a less than b, otherwise false.
*/
#define roce_hw_index_mask(bits) ((1ul << (bits)) - 1)
#define roce_hw_index_shift(bits) (32 - (bits))
#define roce_hw_index_cmp_lt(a, b, bits) \
((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \
roce_hw_index_shift(bits)) < 0)
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
......@@ -245,16 +271,26 @@
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
#define ROCEE_SDB_PTR_CMP_BITS 28
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
(((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
#define ROCEE_SDB_CNT_CMP_BITS 16
#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
#define ROCEE_HW_VERSION_REG 0x8
#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
......@@ -318,7 +354,11 @@
#define ROCEE_SDB_ISSUE_PTR_REG 0x758
#define ROCEE_SDB_SEND_PTR_REG 0x75C
#define ROCEE_CAEP_CQE_WCMD_EMPTY 0x850
#define ROCEE_SCAEP_WR_CQE_CNT 0x8D0
#define ROCEE_SDB_INV_CNT_REG 0x9A4
#define ROCEE_SDB_RETRY_CNT_REG 0x9AC
#define ROCEE_TSP_BP_ST_REG 0x9EC
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
......
......@@ -35,7 +35,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_user.h"
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
......@@ -77,7 +77,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
unsigned long cq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
......@@ -166,7 +166,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
err_out:
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
return ret;
}
......@@ -176,11 +176,10 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
HNS_ROCE_CMD_TIME_CLASS_A);
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
......@@ -204,7 +203,7 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
spin_unlock_irq(&cq_table->lock);
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
......@@ -349,6 +348,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_mtt;
}
/*
* For the QP created by kernel space, tptr value should be initialized
* to zero; For the QP created by user space, it will cause synchronous
* problems if tptr is set to zero here, so we initialze it in user
* space.
*/
if (!context)
*hr_cq->tptr_addr = 0;
/* Get created cq handler and carry out event */
hr_cq->comp = hns_roce_ib_cq_comp;
hr_cq->event = hns_roce_ib_cq_event;
......@@ -383,19 +391,25 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
int ret = 0;
hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (hr_dev->hw->destroy_cq) {
ret = hr_dev->hw->destroy_cq(ib_cq);
} else {
hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (ib_cq->uobject)
ib_umem_release(hr_cq->umem);
else
/* Free the buff of stored cq */
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
if (ib_cq->uobject)
ib_umem_release(hr_cq->umem);
else
/* Free the buff of stored cq */
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
ib_cq->cqe);
kfree(hr_cq);
kfree(hr_cq);
}
return 0;
return ret;
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
......
......@@ -37,6 +37,8 @@
#define DRV_NAME "hns_roce"
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
#define MAC_ADDR_OCTET_NUM 6
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
......@@ -54,6 +56,12 @@
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
(5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
#define HNS_ROCE_MAX_IRQ_NUM 34
#define HNS_ROCE_COMP_VEC_NUM 32
......@@ -70,6 +78,9 @@
#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
#define BITMAP_NO_RR 0
#define BITMAP_RR 1
#define MR_TYPE_MR 0x00
#define MR_TYPE_DMA 0x03
......@@ -196,9 +207,9 @@ struct hns_roce_bitmap {
/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
/* Every bit repesent to a partner free/used status in bitmap */
/*
* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
* Bit = 1 represent to idle and available; bit = 0: not available
*/
* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
* Bit = 1 represent to idle and available; bit = 0: not available
*/
struct hns_roce_buddy {
/* Members point to every order level bitmap */
unsigned long **bits;
......@@ -296,7 +307,7 @@ struct hns_roce_cq {
u32 cq_depth;
u32 cons_index;
void __iomem *cq_db_l;
void __iomem *tptr_addr;
u16 *tptr_addr;
unsigned long cqn;
u32 vector;
atomic_t refcount;
......@@ -360,29 +371,34 @@ struct hns_roce_cmdq {
struct mutex hcr_mutex;
struct semaphore poll_sem;
/*
* Event mode: cmd register mutex protection,
* ensure to not exceed max_cmds and user use limit region
*/
* Event mode: cmd register mutex protection,
* ensure to not exceed max_cmds and user use limit region
*/
struct semaphore event_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
struct hns_roce_cmd_context *context;
/*
* Result of get integer part
* which max_comds compute according a power of 2
*/
* Result of get integer part
* which max_comds compute according a power of 2
*/
u16 token_mask;
/*
* Process whether use event mode, init default non-zero
* After the event queue of cmd event ready,
* can switch into event mode
* close device, switch into poll mode(non event mode)
*/
* Process whether use event mode, init default non-zero
* After the event queue of cmd event ready,
* can switch into event mode
* close device, switch into poll mode(non event mode)
*/
u8 use_events;
u8 toggle;
};
struct hns_roce_cmd_mailbox {
void *buf;
dma_addr_t dma;
};
struct hns_roce_dev;
struct hns_roce_qp {
......@@ -424,8 +440,6 @@ struct hns_roce_ib_iboe {
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
struct notifier_block nb;
struct notifier_block nb_inet;
/* 16 GID is shared by 6 port in v1 engine. */
union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
......@@ -519,6 +533,8 @@ struct hns_roce_hw {
struct ib_recv_wr **bad_recv_wr);
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
void *priv;
};
......@@ -553,6 +569,8 @@ struct hns_roce_dev {
int cmd_mod;
int loop_idc;
dma_addr_t tptr_dma_addr; /*only for hw v1*/
u32 tptr_size; /*only for hw v1*/
struct hns_roce_hw *hw;
};
......@@ -657,7 +675,8 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
int rr);
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
......@@ -665,7 +684,8 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj);
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt);
unsigned long obj, int cnt,
int rr);
struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
......@@ -681,6 +701,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
......@@ -717,6 +741,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
......
......@@ -371,9 +371,9 @@ static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
int i = 0;
/**
* AEQ overflow ECC mult bit err CEQ overflow alarm
* must clear interrupt, mask irq, clear irq, cancel mask operation
*/
* AEQ overflow ECC mult bit err CEQ overflow alarm
* must clear interrupt, mask irq, clear irq, cancel mask operation
*/
aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
if (roce_get_bit(aeshift_val,
......
......@@ -80,9 +80,9 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
--order;
/*
* Alloc memory one time. If failed, don't alloc small block
* memory, directly return fail.
*/
* Alloc memory one time. If failed, don't alloc small block
* memory, directly return fail.
*/
mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
......
......@@ -58,6 +58,7 @@
#define HNS_ROCE_V1_PHY_UAR_NUM 8
#define HNS_ROCE_V1_GID_NUM 16
#define HNS_ROCE_V1_RESV_QP 8
#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
......@@ -102,8 +103,22 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
#define HNS_ROCE_V1_DB_WAIT_OK 0
#define HNS_ROCE_V1_DB_STAGE1 1
#define HNS_ROCE_V1_DB_STAGE2 2
#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS 10000
#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS 20
#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
#define HNS_ROCE_V1_TPTR_BUF_SIZE \
(HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
#define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0
......@@ -140,6 +155,7 @@
#define SQ_PSN_SHIFT 8
#define QKEY_VAL 0x80010000
#define SDB_INV_CNT_OFFSET 8
#define SDB_ST_CMP_VAL 8
struct hns_roce_cq_context {
u32 cqc_byte_4;
......@@ -436,6 +452,8 @@ struct hns_roce_ud_send_wqe {
#define UD_SEND_WQE_U32_8_DMAC_5_M \
(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
(((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
......@@ -480,13 +498,17 @@ struct hns_roce_sqp_context {
u32 qp1c_bytes_12;
u32 qp1c_bytes_16;
u32 qp1c_bytes_20;
u32 qp1c_bytes_28;
u32 cur_rq_wqe_ba_l;
u32 qp1c_bytes_28;
u32 qp1c_bytes_32;
u32 cur_sq_wqe_ba_l;
u32 qp1c_bytes_40;
};
#define QP1C_BYTES_4_QP_STATE_S 0
#define QP1C_BYTES_4_QP_STATE_M \
(((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
(((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
......@@ -952,6 +974,10 @@ struct hns_roce_sq_db {
#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
(((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
#define SQ_DOORBELL_U32_4_SL_S 16
#define SQ_DOORBELL_U32_4_SL_M \
(((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
#define SQ_DOORBELL_U32_4_PORT_S 18
#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
......@@ -979,12 +1005,58 @@ struct hns_roce_bt_table {
struct hns_roce_buf_list cqc_buf;
};
struct hns_roce_tptr_table {
struct hns_roce_buf_list tptr_buf;
};
struct hns_roce_qp_work {
struct work_struct work;
struct ib_device *ib_dev;
struct hns_roce_qp *qp;
u32 db_wait_stage;
u32 sdb_issue_ptr;
u32 sdb_inv_cnt;
u32 sche_cnt;
};
struct hns_roce_des_qp {
struct workqueue_struct *qp_wq;
int requeue_flag;
};
struct hns_roce_mr_free_work {
struct work_struct work;
struct ib_device *ib_dev;
struct completion *comp;
int comp_flag;
void *mr;
};
struct hns_roce_recreate_lp_qp_work {
struct work_struct work;
struct ib_device *ib_dev;
struct completion *comp;
int comp_flag;
};
struct hns_roce_free_mr {
struct workqueue_struct *free_mr_wq;
struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
struct hns_roce_cq *mr_free_cq;
struct hns_roce_pd *mr_free_pd;
};
struct hns_roce_v1_priv {
struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table;
struct hns_roce_bt_table bt_table;
struct hns_roce_tptr_table tptr_table;
struct hns_roce_des_qp des_qp;
struct hns_roce_free_mr free_mr;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp);
#endif
......@@ -35,51 +35,12 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_user.h"
#include <rdma/hns-abi.h>
#include "hns_roce_hem.h"
/**
* hns_roce_addrconf_ifid_eui48 - Get default gid.
* @eui: eui.
* @vlan_id: gid
* @dev: net device
* Description:
* MAC convert to GID
* gid[0..7] = fe80 0000 0000 0000
* gid[8] = mac[0] ^ 2
* gid[9] = mac[1]
* gid[10] = mac[2]
* gid[11] = ff (VLAN ID high byte (4 MS bits))
* gid[12] = fe (VLAN ID low byte)
* gid[13] = mac[3]
* gid[14] = mac[4]
* gid[15] = mac[5]
*/
static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
struct net_device *dev)
{
memcpy(eui, dev->dev_addr, 3);
memcpy(eui + 5, dev->dev_addr + 3, 3);
if (vlan_id < 0x1000) {
eui[3] = vlan_id >> 8;
eui[4] = vlan_id & 0xff;
} else {
eui[3] = 0xff;
eui[4] = 0xfe;
}
eui[0] ^= 2;
}
static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
{
memset(gid, 0, sizeof(*gid));
gid->raw[0] = 0xFE;
gid->raw[1] = 0x80;
hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
}
/**
* hns_get_gid_index - Get gid index.
* @hr_dev: pointer to structure hns_roce_dev.
......@@ -96,30 +57,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
return gid_index * hr_dev->caps.num_ports + port;
}
static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
union ib_gid *gid)
{
struct device *dev = &hr_dev->pdev->dev;
u8 gid_idx = 0;
if (gid_index >= hr_dev->caps.gid_table_len[port]) {
dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
return -EINVAL;
}
gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
return -EINVAL;
memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
return 0;
}
static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
......@@ -135,27 +72,44 @@ static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context)
{
u8 phy_port = hr_dev->iboe.phy_port[port];
enum ib_mtu tmp;
struct hns_roce_dev *hr_dev = to_hr_dev(device);
u8 port = port_num - 1;
unsigned long flags;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
tmp = iboe_get_mtu(mtu);
if (!tmp)
tmp = IB_MTU_256;
hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return 0;
}
static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
unsigned int index, void **context)
{
struct ib_event event;
struct hns_roce_dev *hr_dev = to_hr_dev(device);
union ib_gid zgid = { {0} };
u8 port = port_num - 1;
unsigned long flags;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
/* Refresh gid in ib_cache */
event.device = &hr_dev->ib_dev;
event.element.port_num = port + 1;
event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&event);
hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return 0;
}
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
......@@ -163,9 +117,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
{
struct device *dev = &hr_dev->pdev->dev;
struct net_device *netdev;
unsigned long flags;
union ib_gid gid;
int ret = 0;
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
......@@ -173,7 +124,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
return -ENODEV;
}
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
spin_lock_bh(&hr_dev->iboe.lock);
switch (event) {
case NETDEV_UP:
......@@ -181,23 +132,19 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
case NETDEV_REGISTER:
case NETDEV_CHANGEADDR:
hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
hns_roce_make_default_gid(netdev, &gid);
ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
if (!ret)
hns_roce_update_gids(hr_dev, port);
break;
case NETDEV_DOWN:
/*
* In v1 engine, only support all ports closed together.
*/
* In v1 engine, only support all ports closed together.
*/
break;
default:
dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
break;
}
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
spin_unlock_bh(&hr_dev->iboe.lock);
return 0;
}
static int hns_roce_netdev_event(struct notifier_block *self,
......@@ -224,118 +171,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
return NOTIFY_DONE;
}
static void hns_roce_addr_event(int event, struct net_device *event_netdev,
struct hns_roce_dev *hr_dev, union ib_gid *gid)
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = NULL;
int gid_table_len = 0;
unsigned long flags;
union ib_gid zgid;
u8 gid_idx = 0;
u8 port = 0;
int i = 0;
int free;
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) :
event_netdev;
if (event != NETDEV_UP && event != NETDEV_DOWN)
return;
iboe = &hr_dev->iboe;
while (port < hr_dev->caps.num_ports) {
if (real_dev == iboe->netdevs[port])
break;
port++;
}
if (port >= hr_dev->caps.num_ports) {
dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
return;
}
memset(zgid.raw, 0, sizeof(zgid.raw));
free = -1;
gid_table_len = hr_dev->caps.gid_table_len[port];
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
for (i = 0; i < gid_table_len; i++) {
gid_idx = hns_get_gid_index(hr_dev, port, i);
if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
sizeof(gid->raw)))
break;
if (free < 0 && !memcmp(zgid.raw,
iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
free = i;
}
if (i >= gid_table_len) {
if (free < 0) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
dev_dbg(&hr_dev->pdev->dev,
"gid_index overflow, port(%d)\n", port);
return;
}
if (!hns_roce_set_gid(hr_dev, port, free, gid))
hns_roce_update_gids(hr_dev, port);
} else if (event == NETDEV_DOWN) {
if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
hns_roce_update_gids(hr_dev, port);
}
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
}
static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = ptr;
struct hns_roce_dev *hr_dev;
struct net_device *dev = ifa->ifa_dev->dev;
union ib_gid gid;
ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
hns_roce_addr_event(event, dev, hr_dev, &gid);
return NOTIFY_DONE;
}
static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
{
struct in_ifaddr *ifa_list = NULL;
union ib_gid gid = {{0} };
u32 ipaddr = 0;
int index = 0;
int ret = 0;
u8 i = 0;
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
hns_roce_set_mtu(hr_dev, i,
ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
hr_dev->caps.max_mtu);
hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
if (hr_dev->iboe.netdevs[i]->ip_ptr) {
ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
index = 1;
while (ifa_list) {
ipaddr = ifa_list->ifa_address;
ipv6_addr_set_v4mapped(ipaddr,
(struct in6_addr *)&gid);
ret = hns_roce_set_gid(hr_dev, i, index, &gid);
if (ret)
break;
index++;
ifa_list = ifa_list->ifa_next;
}
hns_roce_update_gids(hr_dev, i);
}
}
return ret;
return 0;
}
static int hns_roce_query_device(struct ib_device *ib_dev,
......@@ -444,31 +290,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
union ib_gid *gid)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev;
u8 gid_idx = 0;
u8 port;
if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
index >= hr_dev->caps.gid_table_len[port_num - 1]) {
dev_err(dev,
"port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
port_num, index, hr_dev->caps.num_ports,
hr_dev->caps.gid_table_len[port_num - 1] - 1);
return -EINVAL;
}
port = port_num - 1;
gid_idx = hns_get_gid_index(hr_dev, port, index);
if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
port_num, index, HNS_ROCE_MAX_GID_NUM);
return -EINVAL;
}
memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
HNS_ROCE_GID_SIZE);
return 0;
}
......@@ -549,6 +370,8 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
return -EINVAL;
......@@ -558,10 +381,15 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
} else {
} else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
/* vm_pgoff: 1 -- TPTR */
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
vma->vm_page_prot))
return -EAGAIN;
} else
return -EINVAL;
}
return 0;
}
......@@ -605,7 +433,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
......@@ -639,6 +467,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->add_gid = hns_roce_add_gid;
ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
......@@ -681,32 +511,22 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
return ret;
}
ret = hns_roce_setup_mtu_gids(hr_dev);
ret = hns_roce_setup_mtu_mac(hr_dev);
if (ret) {
dev_err(dev, "roce_setup_mtu_gids failed!\n");
goto error_failed_setup_mtu_gids;
dev_err(dev, "setup_mtu_mac failed!\n");
goto error_failed_setup_mtu_mac;
}
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
dev_err(dev, "register_netdevice_notifier failed!\n");
goto error_failed_setup_mtu_gids;
}
iboe->nb_inet.notifier_call = hns_roce_inet_event;
ret = register_inetaddr_notifier(&iboe->nb_inet);
if (ret) {
dev_err(dev, "register inet addr notifier failed!\n");
goto error_failed_register_inetaddr_notifier;
goto error_failed_setup_mtu_mac;
}
return 0;
error_failed_register_inetaddr_notifier:
unregister_netdevice_notifier(&iboe->nb);
error_failed_setup_mtu_gids:
error_failed_setup_mtu_mac:
ib_unregister_device(ib_dev);
return ret;
......@@ -940,10 +760,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
/**
* hns_roce_setup_hca - setup host channel adapter
* @hr_dev: pointer to hns roce device
* Return : int
*/
* hns_roce_setup_hca - setup host channel adapter
* @hr_dev: pointer to hns roce device
* Return : int
*/
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
int ret;
......@@ -1008,11 +828,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
}
/**
* hns_roce_probe - RoCE driver entrance
* @pdev: pointer to platform device
* Return : int
*
*/
* hns_roce_probe - RoCE driver entrance
* @pdev: pointer to platform device
* Return : int
*
*/
static int hns_roce_probe(struct platform_device *pdev)
{
int ret;
......@@ -1023,9 +843,6 @@ static int hns_roce_probe(struct platform_device *pdev)
if (!hr_dev)
return -ENOMEM;
memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
hr_dev->pdev = pdev;
platform_set_drvdata(pdev, hr_dev);
......@@ -1125,9 +942,9 @@ static int hns_roce_probe(struct platform_device *pdev)
}
/**
* hns_roce_remove - remove RoCE device
* @pdev: pointer to platform device
*/
* hns_roce_remove - remove RoCE device
* @pdev: pointer to platform device
*/
static int hns_roce_remove(struct platform_device *pdev)
{
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
......
......@@ -42,7 +42,7 @@ static u32 hw_index_to_key(unsigned long ind)
return (u32)(ind >> 24) | (ind << 8);
}
static unsigned long key_to_hw_index(u32 key)
unsigned long key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
......@@ -53,16 +53,16 @@ static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
HNS_ROCE_CMD_SW2HW_MPT,
HNS_ROCE_CMD_TIME_CLASS_B);
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
HNS_ROCE_CMD_TIME_CLASS_B);
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
......@@ -137,11 +137,13 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
__GFP_NOWARN);
if (!buddy->bits[i]) {
buddy->bits[i] = vzalloc(s * sizeof(long));
if (!buddy->bits[i])
goto err_out_free;
}
}
set_bit(0, buddy->bits[buddy->max_order]);
......@@ -151,7 +153,7 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
kvfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
......@@ -164,7 +166,7 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
kvfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
......@@ -287,7 +289,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key));
key_to_hw_index(mr->key), BITMAP_NO_RR);
}
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
......@@ -605,13 +607,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int hns_roce_dereg_mr(struct ib_mr *ibmr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
if (mr->umem)
ib_umem_release(mr->umem);
if (hr_dev->hw->dereg_mr) {
ret = hr_dev->hw->dereg_mr(hr_dev, mr);
} else {
hns_roce_mr_free(hr_dev, mr);
kfree(mr);
if (mr->umem)
ib_umem_release(mr->umem);
return 0;
kfree(mr);
}
return ret;
}
......@@ -40,7 +40,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
{
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
......@@ -121,7 +121,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
BITMAP_NO_RR);
}
int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
......
......@@ -37,7 +37,7 @@
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_user.h"
#include <rdma/hns-abi.h>
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
......@@ -250,7 +250,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
if (base_qpn < SQP_NUM)
return;
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
}
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
......
......@@ -112,9 +112,12 @@
#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768
#define I40IW_MAX_PAGES_PER_FMR 512
#define I40IW_MIN_PAGES_PER_FMR 1
#define IW_CFG_FPM_QP_COUNT 32768
#define I40IW_MAX_PAGES_PER_FMR 512
#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
#define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460
......@@ -210,6 +213,12 @@ struct i40iw_msix_vector {
u32 ceq_id;
};
struct l2params_work {
struct work_struct work;
struct i40iw_device *iwdev;
struct i40iw_l2params l2params;
};
#define I40IW_MSIX_TABLE_SIZE 65
struct virtchnl_work {
......@@ -227,6 +236,7 @@ struct i40iw_device {
struct net_device *netdev;
wait_queue_head_t vchnl_waitq;
struct i40iw_sc_dev sc_dev;
struct i40iw_sc_vsi vsi;
struct i40iw_handler *hdl;
struct i40e_info *ldev;
struct i40e_client *client;
......@@ -280,7 +290,6 @@ struct i40iw_device {
u32 sd_type;
struct workqueue_struct *param_wq;
atomic_t params_busy;
u32 mss;
enum init_completion_state init_state;
u16 mac_ip_table_idx;
atomic_t vchnl_msgs;
......@@ -297,6 +306,14 @@ struct i40iw_device {
u32 mr_stagmask;
u32 mpa_version;
bool dcb;
bool closing;
bool reset;
u32 used_pds;
u32 used_cqs;
u32 used_mrs;
u32 used_qps;
wait_queue_head_t close_wq;
atomic64_t use_count;
};
struct i40iw_ib_device {
......@@ -498,7 +515,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
int i40iw_register_rdma_device(struct i40iw_device *iwdev);
void i40iw_port_ibevent(struct i40iw_device *iwdev);
int i40iw_cm_disconn(struct i40iw_qp *);
void i40iw_cm_disconn(struct i40iw_qp *iwqp);
void i40iw_cm_disconn_worker(void *);
int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
struct sk_buff *);
......@@ -508,20 +525,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
u8 *mac_addr, u8 *mac_index);
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
void i40iw_add_devusecount(struct i40iw_device *iwdev);
void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
struct i40iw_modify_qp_info *info, bool wait);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
struct i40iw_sc_qp *qp,
bool suspend);
enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
struct i40iw_cm_info *cminfo,
enum i40iw_quad_entry_type etype,
enum i40iw_quad_hash_manage_type mtype,
void *cmnode,
bool wait);
void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
u32 qp_num);
......
......@@ -56,8 +56,6 @@
#define I40IW_MAX_IETF_SIZE 32
#define MPA_ZERO_PAD_LEN 4
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
......@@ -299,6 +297,7 @@ struct i40iw_cm_listener {
enum i40iw_cm_listener_state listener_state;
u32 reused_node;
u8 user_pri;
u8 tos;
u16 vlan_id;
bool qhash_set;
bool ipv4;
......@@ -341,9 +340,11 @@ struct i40iw_cm_node {
int accept_pend;
struct list_head timer_entry;
struct list_head reset_entry;
struct list_head connected_entry;
atomic_t passive_state;
bool qhash_set;
u8 user_pri;
u8 tos;
bool ipv4;
bool snd_mark_en;
u16 lsmm_size;
......@@ -368,7 +369,8 @@ struct i40iw_cm_info {
u32 rem_addr[4];
u16 vlan_id;
int backlog;
u16 user_pri;
u8 user_pri;
u8 tos;
bool ipv4;
};
......@@ -445,4 +447,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
u8 *mac_addr,
u32 action);
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
#endif /* I40IW_CM_H */
......@@ -35,6 +35,8 @@
#ifndef I40IW_D_H
#define I40IW_D_H
#define I40IW_FIRST_USER_QP_ID 2
#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
......@@ -67,6 +69,9 @@
#define I40IW_STAG_TYPE_NONSHARED 1
#define I40IW_MAX_USER_PRIORITY 8
#define I40IW_MAX_STATS_COUNT 16
#define I40IW_FIRST_NON_PF_STAT 4
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
......@@ -74,6 +79,8 @@
#define RS_32_1(val, bits) (u32)(val >> bits)
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define QS_HANDLE_UNKNOWN 0xffff
#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
......@@ -1199,8 +1206,11 @@
#define I40IWQPC_RXCQNUM_SHIFT 32
#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
#define I40IWQPC_STAT_INDEX_SHIFT 0
#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
#define I40IWQPC_Q2ADDR_SHIFT 0
#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
#define I40IWQPC_LASTBYTESENT_SHIFT 0
#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
......@@ -1232,11 +1242,8 @@
#define I40IWQPC_PRIVEN_SHIFT 25
#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
#define I40IWQPC_LSMMPRESENT_SHIFT 26
#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
#define I40IWQPC_IWARPMODE_SHIFT 28
#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
......@@ -1713,6 +1720,8 @@ enum i40iw_alignment {
#define OP_MANAGE_VF_PBLE_BP 28
#define OP_QUERY_FPM_VALUES 29
#define OP_COMMIT_FPM_VALUES 30
#define OP_SIZE_CQP_STAT_ARRAY 31
#define OP_REQUESTED_COMMANDS 31
#define OP_COMPLETED_COMMANDS 32
#define OP_SIZE_CQP_STAT_ARRAY 33
#endif
......@@ -198,6 +198,8 @@ enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
......@@ -207,9 +209,9 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
struct i40iw_manage_vf_pble_info *info,
bool wait);
struct i40iw_dev_pestat;
void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
struct i40iw_sc_vsi;
void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
#define i40iw_mmiowb() mmiowb()
void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
......
......@@ -47,8 +47,6 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
struct i40iw_device_init_info *info);
enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
......@@ -64,7 +62,24 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
u32 *vf_cnt_array);
/* cqp misc functions */
/* stats functions */
void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
enum i40iw_hw_stats_index_32b index,
u64 *value);
void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
enum i40iw_hw_stats_index_64b index,
u64 *value);
void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
/* vsi misc functions */
enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
......
......@@ -353,10 +353,6 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
if (!pages) {
ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
goto error;
}
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
......
......@@ -72,12 +72,12 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 32,
I40IW_QPCTX_ENCD_MAXIRD = 3,
I40IW_MAX_IRD_SIZE = 63,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_MAX_ORD_SIZE = 32,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
I40IW_QP_CTX_SIZE = 248
I40IW_QP_CTX_SIZE = 248,
I40IW_MAX_PDS = 32768
};
#define i40iw_handle void *
......@@ -96,12 +96,6 @@ enum i40iw_device_capabilities_const {
#define i40iw_physical_fragment u64
#define i40iw_address_list u64 *
#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key))
#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF)
#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
#define I40IW_MAX_MR_SIZE 0x10000000000L
struct i40iw_qp_uk;
......
......@@ -92,6 +92,8 @@ struct i40iw_mr {
struct ib_umem *region;
u16 type;
u32 page_cnt;
u32 page_size;
u64 page_msk;
u32 npages;
u32 stag;
u64 length;
......
......@@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work)
struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
rec = kzalloc(sizeof *rec, GFP_KERNEL);
if (!rec) {
pr_err("alias_guid_work: No Memory\n");
if (!rec)
return;
}
pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -203,8 +203,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
(unsigned long)(srq->msrq.max * sizeof(u64)));
err = -ENOMEM;
goto err_in;
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册