提交 8e5aa617 编写于 作者: D David S. Miller

Merge branch 'qed-Utilize-FW-8.42.2.0'

Michal Kalderon says:

====================
qed*: Utilize FW 8.42.2.0

This FW contains several fixes and features, main ones listed below.
We have taken into consideration past comments on previous FW versions
that were uploaded and tried to separate this one to smaller patches to
ease review.

- RoCE
	- SRIOV support
	- Fixes in following flows:
		- latency optimization flow for inline WQEs
		- iwarp OOO packed DDPs flow
		- tx-dif workaround calculations flow
		- XRC-SRQ exceed cache num

- iSCSI
	- Fixes:
		- iSCSI TCP out-of-order handling.
		- iscsi retransmit flow

- Fcoe
	- Fixes:
		- upload + cleanup flows

- Debug
	- Better handling of extracting data during traffic
	- ILT Dump -> dumping host memory used by chip
	- MDUMP -> collect debug data on system crash and extract after
	  reboot

Patches prefixed with FW 8.42.2.0 are required to work with binary
8.42.2.0 FW where as the rest are FW related but do not require the
binary.

Changes from V2
---------------
- Move FW version to the start of the series to maintain minimal compatibility
- Fix some kbuild errors:
	- frame size larger than 1024 (Queue Manager patch - remove redundant
	  field from struct)
	- sparse warning on endianity (Dmae patch fix - wrong use of __le32 for field
	  used only on host, should be u32)
	- static should be used for some functions (Debug feature ilt and mdump)
Reported-by: Nkbuild test robot <lkp@intel.com>

Changes from V1
---------------
- Remove epoch + kernel version from device debug dump
- don't bump driver version
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -253,7 +253,8 @@ enum qed_resources {
QED_VLAN,
QED_RDMA_CNQ_RAM,
QED_ILT,
QED_LL2_QUEUE,
QED_LL2_RAM_QUEUE,
QED_LL2_CTX_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_BDQ,
......@@ -461,6 +462,8 @@ struct qed_fw_data {
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
const u32 *fw_overlays;
u32 fw_overlays_len;
u32 init_ops_size;
};
......@@ -531,6 +534,23 @@ struct qed_nvm_image_info {
bool valid;
};
enum qed_hsi_def_type {
QED_HSI_DEF_MAX_NUM_VFS,
QED_HSI_DEF_MAX_NUM_L2_QUEUES,
QED_HSI_DEF_MAX_NUM_PORTS,
QED_HSI_DEF_MAX_SB_PER_PATH,
QED_HSI_DEF_MAX_NUM_PFS,
QED_HSI_DEF_MAX_NUM_VPORTS,
QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
QED_HSI_DEF_MAX_QM_TX_QUEUES,
QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
QED_HSI_DEF_MAX_PBF_CMD_LINES,
QED_HSI_DEF_MAX_BTB_BLOCKS,
QED_NUM_HSI_DEFS
};
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
......@@ -646,6 +666,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
void *dbg_user_info;
struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
/* PWM region specific data */
u16 wid_count;
......@@ -668,6 +689,7 @@ struct qed_hwfn {
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
struct phys_mem_desc *fw_overlay_mem;
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
......@@ -796,8 +818,8 @@ struct qed_dev {
u8 cache_shift;
/* Init */
const struct iro *iro_arr;
#define IRO (p_hwfn->cdev->iro_arr)
const u32 *iro_arr;
#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
/* HW functions */
u8 num_hwfns;
......@@ -856,6 +878,8 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
......@@ -868,16 +892,35 @@ struct qed_dev {
bool iwarp_cmt;
};
#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
: MAX_NUM_VFS_K2)
#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
: MAX_NUM_L2_QUEUES_K2)
#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
: MAX_NUM_PORTS_K2)
#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
: MAX_SB_PER_PATH_K2)
#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
#define NUM_OF_VFS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
#define NUM_OF_L2_QUEUES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
#define NUM_OF_PORTS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
#define NUM_OF_SBS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
#define NUM_OF_ENG_PFS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
#define NUM_OF_VPORTS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
#define NUM_OF_RSS_ENGINES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
#define NUM_OF_QM_TX_QUEUES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
#define NUM_OF_PXP_ILT_RECORDS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
#define NUM_OF_QM_GLOBAL_RLS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
#define NUM_OF_PBF_CMD_LINES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
#define NUM_OF_BTB_BLOCKS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
......
......@@ -50,12 +50,6 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
......@@ -123,126 +117,6 @@ struct src_ent {
/* Alignment is inherent to the type1_task_context structure */
#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
/* PF per protocl configuration object */
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
struct qed_tid_seg {
u32 count;
u8 type;
bool has_fl_mem;
};
struct qed_conn_type_cfg {
u32 cid_count;
u32 cids_per_vf;
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
#define SRQ_BLK (0)
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
enum ilt_clients {
ILT_CLI_CDUC,
ILT_CLI_CDUT,
ILT_CLI_QM,
ILT_CLI_TM,
ILT_CLI_SRC,
ILT_CLI_TSDM,
ILT_CLI_MAX
};
struct ilt_cfg_pair {
u32 reg;
u32 val;
};
struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
u32 dynamic_line_cnt;
};
struct qed_ilt_client_cfg {
bool active;
/* ILT boundaries */
struct ilt_cfg_pair first;
struct ilt_cfg_pair last;
struct ilt_cfg_pair p_size;
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
/* ILT client blocks for VFs */
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
u32 vf_total_lines;
};
/* Per Path -
* ILT shadow table
* Protocol acquired CID lists
* PF start line in ILT
*/
struct qed_dma_mem {
dma_addr_t p_phys;
void *p_virt;
size_t size;
};
struct qed_cid_acquired_map {
u32 start_cid;
u32 max_count;
unsigned long *cid_map;
};
struct qed_cxt_mngr {
/* Per protocl configuration */
struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
/* Task type sizes */
u32 task_type_size[NUM_TASK_TYPES];
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
struct qed_cid_acquired_map
acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
/* Mutex for a dynamic ILT allocation */
struct mutex mutex;
/* SRC T2 */
struct qed_dma_mem *t2;
u32 t2_num_pages;
u64 first_free;
u64 last_free;
/* total number of SRQ's for this hwfn */
u32 srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
......@@ -880,30 +754,60 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
u32 i;
if (!p_mngr->t2)
if (!p_t2 || !p_t2->dma_mem)
return;
for (i = 0; i < p_mngr->t2_num_pages; i++)
if (p_mngr->t2[i].p_virt)
for (i = 0; i < p_t2->num_pages; i++)
if (p_t2->dma_mem[i].virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_mngr->t2[i].size,
p_mngr->t2[i].p_virt,
p_mngr->t2[i].p_phys);
p_t2->dma_mem[i].size,
p_t2->dma_mem[i].virt_addr,
p_t2->dma_mem[i].phys_addr);
kfree(p_t2->dma_mem);
p_t2->dma_mem = NULL;
}
static int
qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
{
void **p_virt;
u32 size, i;
if (!p_t2 || !p_t2->dma_mem)
return -EINVAL;
kfree(p_mngr->t2);
p_mngr->t2 = NULL;
for (i = 0; i < p_t2->num_pages; i++) {
size = min_t(u32, total_size, page_size);
p_virt = &p_t2->dma_mem[i].virt_addr;
*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
size,
&p_t2->dma_mem[i].phys_addr,
GFP_KERNEL);
if (!p_t2->dma_mem[i].virt_addr)
return -ENOMEM;
memset(*p_virt, 0, size);
p_t2->dma_mem[i].size = size;
total_size -= size;
}
return 0;
}
static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_num, total_size, ent_per_page, psz, i;
struct phys_mem_desc *p_t2_last_page;
struct qed_ilt_client_cfg *p_src;
struct qed_src_iids src_iids;
struct qed_dma_mem *p_t2;
struct qed_src_t2 *p_t2;
int rc;
memset(&src_iids, 0, sizeof(src_iids));
......@@ -921,49 +825,39 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
/* use the same page size as the SRC ILT client */
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
p_t2 = &p_mngr->src_t2;
p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
/* allocate t2 */
p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->t2) {
p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
GFP_KERNEL);
if (!p_t2->dma_mem) {
DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
rc = -ENOMEM;
goto t2_fail;
}
/* allocate t2 pages */
for (i = 0; i < p_mngr->t2_num_pages; i++) {
u32 size = min_t(u32, total_size, psz);
void **p_virt = &p_mngr->t2[i].p_virt;
*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
&p_mngr->t2[i].p_phys,
GFP_KERNEL);
if (!p_mngr->t2[i].p_virt) {
rc = -ENOMEM;
goto t2_fail;
}
p_mngr->t2[i].size = size;
total_size -= size;
}
rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
if (rc)
goto t2_fail;
/* Set the t2 pointers */
/* entries per page - must be a power of two */
ent_per_page = psz / sizeof(struct src_ent);
p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
p_mngr->last_free = (u64) p_t2->p_phys +
p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
p_t2->last_free = (u64)p_t2_last_page->phys_addr +
((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
for (i = 0; i < p_mngr->t2_num_pages; i++) {
for (i = 0; i < p_t2->num_pages; i++) {
u32 ent_num = min_t(u32,
ent_per_page,
conn_num);
struct src_ent *entries = p_mngr->t2[i].p_virt;
u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
u32 j;
for (j = 0; j < ent_num - 1; j++) {
......@@ -971,8 +865,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
entries[j].next = cpu_to_be64(val);
}
if (i < p_mngr->t2_num_pages - 1)
val = (u64) p_mngr->t2[i + 1].p_phys;
if (i < p_t2->num_pages - 1)
val = (u64)p_t2->dma_mem[i + 1].phys_addr;
else
val = 0;
entries[j].next = cpu_to_be64(val);
......@@ -988,7 +882,7 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
}
#define for_each_ilt_valid_client(pos, clients) \
for (pos = 0; pos < ILT_CLI_MAX; pos++) \
for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
if (!clients[pos].active) { \
continue; \
} else \
......@@ -1014,13 +908,13 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
ilt_size = qed_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
if (p_dma->p_virt)
if (p_dma->virt_addr)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_dma->size, p_dma->p_virt,
p_dma->p_phys);
p_dma->p_virt = NULL;
p_dma->size, p_dma->virt_addr,
p_dma->phys_addr);
p_dma->virt_addr = NULL;
}
kfree(p_mngr->ilt_shadow);
}
......@@ -1030,7 +924,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client,
u32 start_line_offset)
{
struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */
......@@ -1059,8 +953,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
if (!p_virt)
return -ENOMEM;
ilt_shadow[line].p_phys = p_phys;
ilt_shadow[line].p_virt = p_virt;
ilt_shadow[line].phys_addr = p_phys;
ilt_shadow[line].virt_addr = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
......@@ -1083,7 +977,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
int rc;
size = qed_cxt_ilt_shadow_size(clients);
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
rc = -ENOMEM;
......@@ -1092,7 +986,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
(u32)(size * sizeof(struct qed_dma_mem)));
(u32)(size * sizeof(struct phys_mem_desc)));
for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
......@@ -1238,15 +1132,20 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
for (i = 0; i < MAX_ILT_CLIENTS; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
/* Initialize task sizes */
p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
if (p_hwfn->cdev->p_iov_info)
if (p_hwfn->cdev->p_iov_info) {
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
p_mngr->first_vf_in_pf =
p_hwfn->cdev->p_iov_info->first_vf_in_pf;
}
/* Initialize the dynamic ILT allocation mutex */
mutex_init(&p_mngr->mutex);
......@@ -1522,7 +1421,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
......@@ -1674,7 +1572,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
struct qed_dma_mem *p_shdw;
struct phys_mem_desc *p_shdw;
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
......@@ -1699,15 +1597,15 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
/** p_virt could be NULL incase of dynamic
* allocation
*/
if (p_shdw[line].p_virt) {
if (p_shdw[line].virt_addr) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
(p_shdw[line].p_phys >> 12));
(p_shdw[line].phys_addr >> 12));
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
rt_offst, line, i,
(u64)(p_shdw[line].p_phys >> 12));
(u64)(p_shdw[line].phys_addr >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
......@@ -2050,10 +1948,10 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
if (!p_mngr->ilt_shadow[line].p_virt)
if (!p_mngr->ilt_shadow[line].virt_addr)
return -EINVAL;
p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
......@@ -2234,7 +2132,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < total_lines; i++) {
shadow_line = i + p_fl_seg->start_line -
p_hwfn->p_cxt_mngr->pf_start_line;
p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
}
p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
p_fl_seg->real_size_in_page;
......@@ -2296,7 +2194,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
goto out0;
p_ptt = qed_ptt_acquire(p_hwfn);
......@@ -2334,8 +2232,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
}
}
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
p_blk->real_size_in_page;
......@@ -2345,9 +2243,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
ilt_hw_entry = 0;
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry,
ILT_ENTRY_PHY_ADDR,
(p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
(p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
>> 12));
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
......@@ -2434,16 +2332,16 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
}
for (i = shadow_start_line; i < shadow_end_line; i++) {
if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
continue;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
/* compute absolute offset */
......@@ -2547,8 +2445,76 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
ilt_idx = tid / num_tids_per_block + p_seg->start_line -
p_mngr->pf_start_line;
*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
(tid % num_tids_per_block) * tid_size;
return 0;
}
static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
{
if (p_blk->real_size_in_page == 0)
return 0;
return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
}
u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
u16 i, pages = 0;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
pages += qed_blk_calculate_pages(p_blk);
}
return pages;
}
u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
u16 i, pages = 0;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
pages += qed_blk_calculate_pages(p_blk);
}
return pages;
}
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
u16 i, pages = 0;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
pages += qed_blk_calculate_pages(p_blk);
}
return pages;
}
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
u16 pages = 0, i;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
pages += qed_blk_calculate_pages(p_blk);
}
return pages;
}
......@@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_FL_MEM 1
int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
u32 tid, u8 ctx_type, void **task_ctx);
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
#define NUM_TASK_VF_SEGMENTS 1
/* PF per protocl configuration object */
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
struct qed_tid_seg {
u32 count;
u8 type;
bool has_fl_mem;
};
struct qed_conn_type_cfg {
u32 cid_count;
u32 cids_per_vf;
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration,
* Per connection type (protocol) resources (cids, tis, vf cids etc.)
* 1 - for connection context (CDUC) and for each task context we need two
* values, for regular task context and for force load memory
*/
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
#define SRQ_BLK (0)
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
struct ilt_cfg_pair {
u32 reg;
u32 val;
};
struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
u32 dynamic_line_offset;
u32 dynamic_line_cnt;
};
struct qed_ilt_client_cfg {
bool active;
/* ILT boundaries */
struct ilt_cfg_pair first;
struct ilt_cfg_pair last;
struct ilt_cfg_pair p_size;
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
/* ILT client blocks for VFs */
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
u32 vf_total_lines;
};
struct qed_cid_acquired_map {
u32 start_cid;
u32 max_count;
unsigned long *cid_map;
};
struct qed_src_t2 {
struct phys_mem_desc *dma_mem;
u32 num_pages;
u64 first_free;
u64 last_free;
};
struct qed_cxt_mngr {
/* Per protocl configuration */
struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
/* computed ILT structure */
struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
/* Task type sizes */
u32 task_type_size[NUM_TASK_TYPES];
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
u32 first_vf_in_pf;
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
struct qed_cid_acquired_map
acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
/* ILT shadow table */
struct phys_mem_desc *ilt_shadow;
u32 ilt_shadow_size;
u32 pf_start_line;
/* Mutex for a dynamic ILT allocation */
struct mutex mutex;
/* SRC T2 */
struct qed_src_t2 src_t2;
u32 t2_num_pages;
u64 first_free;
u64 last_free;
/* total number of SRQ's for this hwfn */
u32 srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
u8 task_type_id;
u16 task_ctx_size;
u16 conn_ctx_size;
};
u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
#endif
......@@ -14,11 +14,13 @@ enum qed_dbg_features {
DBG_FEATURE_IGU_FIFO,
DBG_FEATURE_PROTECTION_OVERRIDE,
DBG_FEATURE_FW_ASSERTS,
DBG_FEATURE_ILT,
DBG_FEATURE_NUM
};
/* Forward Declaration */
struct qed_dev;
struct qed_hwfn;
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
......@@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev);
int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
......
......@@ -907,7 +907,7 @@ qed_llh_access_filter(struct qed_hwfn *p_hwfn,
/* Filter value */
addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
params.flags = QED_DMAE_FLAG_PF_DST;
SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
params.dst_pfid = pfid;
rc = qed_dmae_host2grc(p_hwfn,
p_ptt,
......@@ -1412,6 +1412,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
......@@ -1571,7 +1572,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
/* all vports participate in weighted fair queueing */
for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
qm_info->qm_vport_params[i].wfq = 1;
}
/* initialize qm port params */
......@@ -1579,6 +1580,7 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
struct qed_dev *cdev = p_hwfn->cdev;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
......@@ -1588,11 +1590,13 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
for (i = 0; i < num_ports; i++) {
struct init_qm_port_params *p_qm_port =
&p_hwfn->qm_info.qm_port_params[i];
u16 pbf_max_cmd_lines;
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
}
}
......@@ -2034,9 +2038,8 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
vport = &(qm_info->qm_vport_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
qm_info->start_vport + i,
vport->vport_rl, vport->vport_wfq);
"vport idx %d, wfq %d, first_tx_pq_id [ ",
qm_info->start_vport + i, vport->wfq);
for (tc = 0; tc < NUM_OF_TCS; tc++)
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
......@@ -2049,11 +2052,11 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
pq = &(qm_info->qm_pq_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
qm_info->start_pq + i,
pq->port_id,
pq->vport_id,
pq->tc_id, pq->wrr_group, pq->rl_valid);
pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
}
}
......@@ -2103,9 +2106,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!b_rc)
return -EINVAL;
/* clear the QM_PF runtime phase leftovers from previous init */
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn, p_ptt, false);
......@@ -2346,7 +2346,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
rc = qed_dbg_alloc_user_data(p_hwfn);
rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
if (rc)
goto alloc_err;
}
......@@ -2623,7 +2623,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
params.vport_rl_en = qm_info->vport_rl_en;
params.global_rl_en = qm_info->vport_rl_en;
params.vport_wfq_en = qm_info->vport_wfq_en;
params.port_params = qm_info->qm_port_params;
......@@ -2891,6 +2891,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
......@@ -3000,8 +3002,10 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, i;
const u32 *fw_overlays;
u32 fw_overlays_len;
u16 ether_type;
int rc = 0, i;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
......@@ -3102,6 +3106,17 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
*/
qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
fw_overlays = cdev->fw_data->fw_overlays;
fw_overlays_len = cdev->fw_data->fw_overlays_len;
p_hwfn->fw_overlay_mem =
qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
fw_overlays_len);
if (!p_hwfn->fw_overlay_mem) {
DP_NOTICE(p_hwfn,
"Failed to allocate fw overlay memory\n");
goto load_err;
}
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
......@@ -3566,8 +3581,10 @@ const char *qed_hw_get_resc_name(enum qed_resources res_id)
return "RDMA_CNQ_RAM";
case QED_ILT:
return "ILT";
case QED_LL2_QUEUE:
return "LL2_QUEUE";
case QED_LL2_RAM_QUEUE:
return "LL2_RAM_QUEUE";
case QED_LL2_CTX_QUEUE:
return "LL2_CTX_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
......@@ -3606,18 +3623,46 @@ __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
return 0;
}
static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
{MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
{MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
{MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
{MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
{MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
{MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
{ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
{MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
{PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
{RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
{MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
{PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
{BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
};
u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
{
enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
if (type >= QED_NUM_HSI_DEFS) {
DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
return 0;
}
return qed_hsi_def_val[type][chip_id];
}
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
bool b_ah = QED_IS_AH(p_hwfn->cdev);
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
case QED_LL2_QUEUE:
resc_max_val = MAX_NUM_LL2_RX_QUEUES;
case QED_LL2_RAM_QUEUE:
resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
break;
case QED_LL2_CTX_QUEUE:
resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
break;
case QED_RDMA_CNQ_RAM:
/* No need for a case for QED_CMDQS_CQS since
......@@ -3626,8 +3671,8 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
: RDMA_NUM_STATISTIC_COUNTERS_BB;
resc_max_val =
NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
break;
case QED_BDQ:
resc_max_val = BDQ_NUM_RESOURCES;
......@@ -3660,28 +3705,24 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_dev *cdev = p_hwfn->cdev;
switch (res_id) {
case QED_L2_QUEUE:
*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
MAX_NUM_L2_QUEUES_BB) / num_funcs;
*p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
break;
case QED_VPORT:
*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
*p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
break;
case QED_RSS_ENG:
*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
ETH_RSS_ENGINE_NUM_BB) / num_funcs;
*p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
break;
case QED_PQ:
*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
MAX_QM_TX_QUEUES_BB) / num_funcs;
*p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
*p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
......@@ -3689,11 +3730,13 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
PXP_NUM_ILT_RECORDS_BB) / num_funcs;
*p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
break;
case QED_LL2_RAM_QUEUE:
*p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
break;
case QED_LL2_QUEUE:
*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
case QED_LL2_CTX_QUEUE:
*p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
......@@ -3701,8 +3744,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
*p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
......@@ -5087,11 +5129,11 @@ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
min_pf_rate;
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
vport_params[i].wfq);
}
}
......@@ -5102,7 +5144,7 @@ static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
int i;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
}
static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
......@@ -5118,7 +5160,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
qed_init_wfq_default_param(p_hwfn, min_pf_rate);
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
vport_params[i].wfq);
}
}
......
......@@ -230,30 +230,6 @@ enum qed_dmae_address_type_t {
QED_DMAE_ADDRESS_GRC
};
/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
* source is a block of length DMAE_MAX_RW_SIZE and the
* destination is larger, the source block will be duplicated as
* many times as required to fill the destination block. This is
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
#define QED_DMAE_FLAG_PORT 0x00000010
#define QED_DMAE_FLAG_PF_SRC 0x00000020
#define QED_DMAE_FLAG_PF_DST 0x00000040
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
u8 port_id;
u8 src_pfid;
u8 dst_pfid;
};
/**
* @brief qed_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
......
......@@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
goto err;
}
p_cxt = cxt_info.p_cxt;
memset(p_cxt, 0, sizeof(*p_cxt));
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
......
......@@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
/* DMAE */
#define QED_DMAE_FLAGS_IS_SET(params, flag) \
((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
......@@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) <<
DMAE_CMD_SRC_SHIFT;
src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
p_params->src_pfid : p_hwfn->rel_pf_id;
opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT);
SET_FIELD(opcode, DMAE_CMD_SRC,
(is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
p_params->src_pfid : p_hwfn->rel_pf_id;
SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) <<
DMAE_CMD_DST_SHIFT;
dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
p_params->dst_pfid : p_hwfn->rel_pf_id;
opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT);
SET_FIELD(opcode, DMAE_CMD_DST,
(is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
p_params->dst_pfid : p_hwfn->rel_pf_id;
SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
/* swapping mode 3 - big endian */
SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
p_params->port_id : p_hwfn->port_id;
opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
p_params->port_id : p_hwfn->port_id;
SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
/* reset source address in next go */
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
/* reset dest address in next go */
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
} else {
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT;
SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
}
if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
......
......@@ -54,15 +54,15 @@ static u32 pxp_global_win[] = {
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
0,
0,
0,
0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
0,
0,
0,
......@@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev)
cdev->iro_arr = iro_arr;
}
/* Runtime configuration helpers */
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
{
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
p_hwfn->rt_data.b_valid[i] = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
......@@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
u16 i, segment;
u16 i, j, segment;
int rc = 0;
/* Since not all RT entries are initialized, go over the RT and
......@@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
p_valid[i] = false;
continue;
}
......@@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
/* invalidate after writing */
for (j = i; j < i + segment; j++)
p_valid[j] = false;
/* Jump over the entire segment, including invalid entry */
i += segment;
}
......@@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count, &params);
......@@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
bool b_dmae = (phase != PHASE_ENGINE);
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
......@@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
......@@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_CALLBACK:
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
if (phase == PHASE_ENGINE &&
cmd->callback.callback_id == DMAE_READY_CB)
b_dmae = true;
break;
}
......@@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
fw->fw_overlays = (u32 *)(data + offset);
len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
fw->fw_overlays_len = len;
return 0;
}
......@@ -80,14 +80,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn);
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_clear_rt_data - Clears the runtime init array.
*
*
* @param p_hwfn
*/
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
*
......
......@@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
val = p_params->half_way_close_timeout;
p_init->half_way_close_timeout = cpu_to_le16(val);
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id;
p_init->ll2_rx_queue_id =
p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
......@@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_conn->physical_q1 = cpu_to_le16(physical_q);
p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
......@@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length;
......@@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
ucval = p_conn->remote_mac[1];
((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
ucval = p_conn->remote_mac[0];
......@@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->abortive = p_conn->abortive_dsconnect;
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
......@@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_slow_path_hdr *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
......@@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_empty;
p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
SET_FIELD(p_ramrod->flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
......@@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_spe_func_dstry *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
......@@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
......
......@@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{
p_ramrod->iwarp.ll2_ooo_q_index =
RESC_START(p_hwfn, QED_LL2_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
......@@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
/* SYN will use ctx based queues */
data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
......@@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
/* Start OOO connection */
data.input.conn_type = QED_LL2_TYPE_OOO;
/* OOO/unaligned will use legacy ll2 queues (ram based) */
data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
data.input.mtu = params->max_mtu;
n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
......
......@@ -2637,7 +2637,7 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
if (!ptt)
return -EAGAIN;
rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
rc = qed_dbg_grc_config(hwfn, cfg_id, val);
qed_ptt_release(hwfn, ptt);
......
......@@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
goto err_resp;
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
......
......@@ -120,9 +120,7 @@ union ramrod_data {
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
struct fcoe_stat_ramrod_params fcoe_stat;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册