提交 8db573ba 编写于 作者: A Ariel Elior 提交者: David S. Miller

bnx2x: Support of PF driver of a VF setup_q request

Upon receiving a 'setup_q' request from the VF over the VF <-> PF
channel the PF driver will open a corresponding queue in the
device. The PF driver configures the queue with appropriate mac
address, vlan configuration, etc from the VF.
Signed-off-by: NAriel Elior <ariele@broadcom.com>
Signed-off-by: NEilon Greenstein <eilong@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 67c431a5
......@@ -969,6 +969,7 @@ extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_CID_WND 0
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
#define BNX2X_CLIENTS_PER_VF 1
#define BNX2X_FIRST_VF_CID 256
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
#define BNX2X_VF_ID_INVALID 0xFF
......
......@@ -2029,7 +2029,7 @@ static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{
int num_groups;
int num_groups, vf_headroom = 0;
int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
/* number of queues for statistics is number of eth queues + FCoE */
......@@ -2042,18 +2042,26 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
*/
bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
/* vf stats appear in the request list, but their data is allocated by
* the VFs themselves. We don't include them in the bp->fw_stats_num as
* it is used to determine where to place the vf stats queries in the
* request struct
*/
if (IS_SRIOV(bp))
vf_headroom = bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
/* Request is built from stats_query_header and an array of
* stats_query_cmd_group each of which contains
* STATS_QUERY_CMD_COUNT rules. The real number or requests is
* configured in the stats_query_header.
*/
num_groups =
(((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
(((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ?
(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
(((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
1 : 0));
DP(BNX2X_MSG_SP, "stats fw_stats_num %d, num_groups %d\n",
bp->fw_stats_num, num_groups);
DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
bp->fw_stats_num, vf_headroom, num_groups);
bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
num_groups * sizeof(struct stats_query_cmd_group);
......
......@@ -26,6 +26,8 @@
* The VF array is indexed by the relative vfid.
*/
#define BNX2X_VF_MAX_QUEUES 16
#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
struct bnx2x_sriov {
u32 first_vf_in_pf;
......@@ -91,6 +93,11 @@ struct bnx2x_virtf;
/* VFOP definitions */
typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
struct bnx2x_vfop_cmd {
vfop_handler_t done;
bool block;
};
/* VFOP queue filters command additional arguments */
struct bnx2x_vfop_filter {
struct list_head link;
......@@ -405,6 +412,11 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
return vf->igu_base_id + q->index;
}
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
return vfq_cl_id(vf, q);
}
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
return vfq_cl_id(vf, q);
......@@ -435,6 +447,45 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* init */
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
/* VFOP generic helpers */
#define bnx2x_vfop_default(state) do { \
BNX2X_ERR("Bad state %d\n", (state)); \
vfop->rc = -EINVAL; \
goto op_err; \
} while (0)
enum {
VFOP_DONE,
VFOP_CONT,
VFOP_VERIFY_PEND,
};
#define bnx2x_vfop_finalize(vf, rc, next) do { \
if ((rc) < 0) \
goto op_err; \
else if ((rc) > 0) \
goto op_pending; \
else if ((next) == VFOP_DONE) \
goto op_done; \
else if ((next) == VFOP_VERIFY_PEND) \
BNX2X_ERR("expected pending\n"); \
else { \
DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \
atomic_set(&vf->op_in_progress, 1); \
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
return; \
} \
} while (0)
#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
do { \
vfop->state = first_state; \
vfop->op_p = &vf->op_params; \
vfop->transition = trans_hndlr; \
vfop->done = done_hndlr; \
} while (0)
static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
......@@ -443,6 +494,132 @@ static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
}
static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
if (vfop) {
INIT_LIST_HEAD(&vfop->link);
list_add(&vfop->link, &vf->op_list_head);
}
return vfop;
}
static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vfop *vfop)
{
/* rc < 0 - error, otherwise set to 0 */
DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
if (vfop->rc >= 0)
vfop->rc = 0;
DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
/* unlink the current op context and propagate error code
* must be done before invoking the 'done()' handler
*/
WARN(!mutex_is_locked(&vf->op_mutex),
"about to access vf op linked list but mutex was not locked!");
list_del(&vfop->link);
if (list_empty(&vf->op_list_head)) {
DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
vf->op_rc = vfop->rc;
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
} else {
struct bnx2x_vfop *cur_vfop;
DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
cur_vfop = bnx2x_vfop_cur(bp, vf);
cur_vfop->rc = vfop->rc;
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
}
/* invoke done handler */
if (vfop->done) {
DP(BNX2X_MSG_IOV, "calling done handler\n");
vfop->done(bp, vf);
}
DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
vf->op_rc, vfop->rc);
/* if this is the last nested op reset the wait_blocking flag
* to release any blocking wrappers, only after 'done()' is invoked
*/
if (list_empty(&vf->op_list_head)) {
DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
vf->op_wait_blocking = false;
}
kfree(vfop);
}
static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
/* can take a while if any port is running */
int cnt = 5000;
might_sleep();
while (cnt--) {
if (vf->op_wait_blocking == false) {
#ifdef BNX2X_STOP_ON_ERROR
DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
#endif
return 0;
}
usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
}
/* timeout! */
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
#endif
return -EBUSY;
}
static inline int bnx2x_vfop_transition(struct bnx2x *bp,
struct bnx2x_virtf *vf,
vfop_handler_t transition,
bool block)
{
if (block)
vf->op_wait_blocking = true;
transition(bp, vf);
if (block)
return bnx2x_vfop_wait_blocking(bp, vf);
return 0;
}
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
u16 q_idx, u16 sb_idx);
void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
u16 q_idx, u16 sb_idx);
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
struct bnx2x_vfop_qctor_params *p,
unsigned long q_type);
int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
/* VF FLR helpers */
......
......@@ -370,6 +370,149 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf);
}
/* convert MBX queue-flags to standard SP queue-flags */
static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
unsigned long *sp_q_flags)
{
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
__set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
__set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
__set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
__set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
}
static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
/* verify vf_qid */
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
setup_q->vf_qid, vf_rxq_count(vf));
vf->op_rc = -EINVAL;
goto response;
}
/* tx queues must be setup alongside rx queues thus if the rx queue
* is not marked as valid there's nothing to do.
*/
if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
unsigned long q_type = 0;
struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p;
/* reinit the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
init_p = &vf->op_params.qctor.qstate.params.init;
/* activate immediately */
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
if (setup_q->param_valid & VFPF_TXQ_VALID) {
struct bnx2x_txq_setup_params *txq_params =
&setup_p->txq_params;
__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
/* save sb resource index */
q->sb_idx = setup_q->txq.vf_sb;
/* tx init */
init_p->tx.hc_rate = setup_q->txq.hc_rate;
init_p->tx.sb_cq_index = setup_q->txq.sb_index;
bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
&init_p->tx.flags);
/* tx setup - flags */
bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
&setup_p->flags);
/* tx setup - general, nothing */
/* tx setup - tx */
txq_params->dscr_map = setup_q->txq.txq_addr;
txq_params->sb_cq_index = setup_q->txq.sb_index;
txq_params->traffic_type = setup_q->txq.traffic_type;
bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
if (setup_q->param_valid & VFPF_RXQ_VALID) {
struct bnx2x_rxq_setup_params *rxq_params =
&setup_p->rxq_params;
__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
/* Note: there is no support for different SBs
* for TX and RX
*/
q->sb_idx = setup_q->rxq.vf_sb;
/* rx init */
init_p->rx.hc_rate = setup_q->rxq.hc_rate;
init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
&init_p->rx.flags);
/* rx setup - flags */
bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
&setup_p->flags);
/* rx setup - general */
setup_p->gen_params.mtu = setup_q->rxq.mtu;
/* rx setup - rx */
rxq_params->drop_flags = setup_q->rxq.drop_flags;
rxq_params->dscr_map = setup_q->rxq.rxq_addr;
rxq_params->sge_map = setup_q->rxq.sge_addr;
rxq_params->rcq_map = setup_q->rxq.rcq_addr;
rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
rxq_params->buf_sz = setup_q->rxq.buf_sz;
rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
rxq_params->cache_line_log =
setup_q->rxq.cache_line_log;
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
q->index, q->sb_idx);
}
/* complete the preparations */
bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
if (vf->op_rc)
goto response;
return;
}
response:
bnx2x_vf_mbx_resp(bp, vf);
}
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
......@@ -391,6 +534,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_INIT:
bnx2x_vf_mbx_init_vf(bp, vf, mbx);
break;
case CHANNEL_TLV_SETUP_Q:
bnx2x_vf_mbx_setup_q(bp, vf, mbx);
break;
}
} else {
/* unknown TLV - this may belong to a VF driver from the future
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册