提交 a1c69520 编写于 作者: R Rahul Lakkireddy 提交者: David S. Miller

cxgb4: collect MC memory dump

Use meminfo to get base address and size of MC memory.  Also use same
meminfo for EDC memory dumps.
Signed-off-by: NRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: NGanesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 123e25c4
...@@ -18,17 +18,14 @@ ...@@ -18,17 +18,14 @@
#ifndef __CUDBG_ENTITY_H__ #ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__ #define __CUDBG_ENTITY_H__
#define EDC0_FLAG 3 #define EDC0_FLAG 0
#define EDC1_FLAG 4 #define EDC1_FLAG 1
#define MC_FLAG 2
#define MC0_FLAG 3
#define MC1_FLAG 4
#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001 #define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
struct card_mem {
u16 size_edc0;
u16 size_edc1;
u16 mem_flag;
};
struct cudbg_mbox_log { struct cudbg_mbox_log {
struct mbox_cmd entry; struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8]; u32 hi[MBOX_LEN / 8];
......
...@@ -47,6 +47,8 @@ enum cudbg_dbg_entity_type { ...@@ -47,6 +47,8 @@ enum cudbg_dbg_entity_type {
CUDBG_CIM_OBQ_NCSI = 17, CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18, CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19, CUDBG_EDC1 = 19,
CUDBG_MC0 = 20,
CUDBG_MC1 = 21,
CUDBG_RSS = 22, CUDBG_RSS = 22,
CUDBG_RSS_VF_CONF = 25, CUDBG_RSS_VF_CONF = 25,
CUDBG_PATH_MTU = 27, CUDBG_PATH_MTU = 27,
......
...@@ -682,6 +682,42 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, ...@@ -682,6 +682,42 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
} }
static int cudbg_meminfo_get_mem_index(struct adapter *padap,
struct cudbg_meminfo *mem_info,
u8 mem_type, u8 *idx)
{
u8 i, flag;
switch (mem_type) {
case MEM_EDC0:
flag = EDC0_FLAG;
break;
case MEM_EDC1:
flag = EDC1_FLAG;
break;
case MEM_MC0:
/* Some T5 cards have both MC0 and MC1. */
flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
break;
case MEM_MC1:
flag = MC1_FLAG;
break;
default:
return CUDBG_STATUS_ENTITY_NOT_FOUND;
}
for (i = 0; i < mem_info->avail_c; i++) {
if (mem_info->avail[i].idx == flag) {
*idx = i;
return 0;
}
}
return CUDBG_STATUS_ENTITY_NOT_FOUND;
}
#define CUDBG_YIELD_ITERATION 256
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, u8 mem_type, struct cudbg_buffer *dbg_buff, u8 mem_type,
unsigned long tot_len, unsigned long tot_len,
...@@ -690,10 +726,20 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, ...@@ -690,10 +726,20 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
unsigned long bytes, bytes_left, bytes_read = 0; unsigned long bytes, bytes_left, bytes_read = 0;
struct adapter *padap = pdbg_init->adap; struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 }; struct cudbg_buffer temp_buff = { 0 };
u32 yield_count = 0;
int rc = 0; int rc = 0;
bytes_left = tot_len; bytes_left = tot_len;
while (bytes_left > 0) { while (bytes_left > 0) {
/* As MC size is huge and read through PIO access, this
* loop will hold cpu for a longer time. OS may think that
* the process is hanged and will generate CPU stall traces.
* So yield the cpu regularly.
*/
yield_count++;
if (!(yield_count % CUDBG_YIELD_ITERATION))
schedule();
bytes = min_t(unsigned long, bytes_left, bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE); (unsigned long)CUDBG_CHUNK_SIZE);
rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff); rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
...@@ -717,27 +763,6 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, ...@@ -717,27 +763,6 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
return rc; return rc;
} }
static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
struct card_mem *mem_info)
{
struct adapter *padap = pdbg_init->adap;
u32 value;
value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
value = EDRAM0_SIZE_G(value);
mem_info->size_edc0 = (u16)value;
value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
value = EDRAM1_SIZE_G(value);
mem_info->size_edc1 = (u16)value;
value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
if (value & EDRAM0_ENABLE_F)
mem_info->mem_flag |= (1 << EDC0_FLAG);
if (value & EDRAM1_ENABLE_F)
mem_info->mem_flag |= (1 << EDC1_FLAG);
}
static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err) struct cudbg_error *cudbg_err)
{ {
...@@ -757,37 +782,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, ...@@ -757,37 +782,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err, struct cudbg_error *cudbg_err,
u8 mem_type) u8 mem_type)
{ {
struct card_mem mem_info = {0}; struct adapter *padap = pdbg_init->adap;
unsigned long flag, size; struct cudbg_meminfo mem_info;
unsigned long size;
u8 mc_idx;
int rc; int rc;
memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
rc = cudbg_fill_meminfo(padap, &mem_info);
if (rc)
return rc;
cudbg_t4_fwcache(pdbg_init, cudbg_err); cudbg_t4_fwcache(pdbg_init, cudbg_err);
cudbg_collect_mem_info(pdbg_init, &mem_info); rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
switch (mem_type) { if (rc)
case MEM_EDC0: return rc;
flag = (1 << EDC0_FLAG);
size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
break;
case MEM_EDC1:
flag = (1 << EDC1_FLAG);
size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
break;
default:
rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
goto err;
}
if (mem_info.mem_flag & flag) { size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
size, cudbg_err); cudbg_err);
if (rc)
goto err;
} else {
rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
goto err;
}
err:
return rc;
} }
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
...@@ -806,6 +819,22 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, ...@@ -806,6 +819,22 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
MEM_EDC1); MEM_EDC1);
} }
int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
MEM_MC0);
}
int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
MEM_MC1);
}
int cudbg_collect_rss(struct cudbg_init *pdbg_init, int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err) struct cudbg_error *cudbg_err)
......
...@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, ...@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_rss(struct cudbg_init *pdbg_init, int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err); struct cudbg_error *cudbg_err);
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo }, { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo }, { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
{ CUDBG_MC0, cudbg_collect_mc0_meminfo },
{ CUDBG_MC1, cudbg_collect_mc1_meminfo },
}; };
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
...@@ -158,6 +160,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) ...@@ -158,6 +160,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
} }
len = cudbg_mbytes_to_bytes(len); len = cudbg_mbytes_to_bytes(len);
break; break;
case CUDBG_MC0:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & EXT_MEM0_ENABLE_F) {
value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
len = EXT_MEM0_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
case CUDBG_MC1:
value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
if (value & EXT_MEM1_ENABLE_F) {
value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
len = EXT_MEM1_SIZE_G(value);
}
len = cudbg_mbytes_to_bytes(len);
break;
case CUDBG_RSS: case CUDBG_RSS:
len = RSS_NENTRIES * sizeof(u16); len = RSS_NENTRIES * sizeof(u16);
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册