提交 b2c1b30e 编写于 作者: D David S. Miller

Merge branch 'thunderx-fixes'

Sunil Goutham says:

====================
net: thunderx: Miscellaneous fixes

This 2 patch series fixes issues w.r.t physical link status
reporting and transmit datapath configuration for
secondary qsets.

Changes from v1:
Fixed lmac disable sequence for interfaces of type SGMII.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, ...@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
u32 rr_quantum; u32 rr_quantum;
u8 sq_idx = sq->sq_num; u8 sq_idx = sq->sq_num;
u8 pqs_vnic; u8 pqs_vnic;
int svf;
if (sq->sqs_mode) if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic]; pqs_vnic = nic->pqs_vf[vnic];
...@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, ...@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */ /* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
if (!sq->sqs_mode) {
tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
} else {
for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
if (nic->vf_sqs[pqs_vnic][svf] == vnic)
break;
}
tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
tl4 += (svf * NIC_TL4_PER_LMAC);
tl4 += (bgx * NIC_TL4_PER_BGX);
}
tl4 += sq_idx; tl4 += sq_idx;
if (sq->sqs_mode)
tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
......
...@@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac) ...@@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
} }
/* Clear rcvflt bit (latching high) and read it back */ /* Clear rcvflt bit (latching high) and read it back */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
bgx_reg_modify(bgx, lmacid,
BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) { if (bgx->use_training) {
...@@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac) ...@@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1; return -1;
} }
/* Wait for MAC RX to be ready */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
SMU_RX_CTL_STATUS, true)) {
dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
return -1;
}
/* Wait for BGX RX to be idle */ /* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
...@@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac) ...@@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1; return -1;
} }
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { /* Clear receive packet disable */
dev_err(&bgx->pdev->dev, "Receive fault\n");
return -1;
}
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false)) {
dev_err(&bgx->pdev->dev, "SPU receive link down\n");
return -1;
}
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS; cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
/* Check for MAC RX faults */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
cfg &= SMU_RX_CTL_STATUS;
if (!cfg)
return 0; return 0;
/* Rx local/remote fault seen.
* Do lmac reinit to see if condition recovers
*/
bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
return -1;
} }
static void bgx_poll_for_link(struct work_struct *work) static void bgx_poll_for_link(struct work_struct *work)
{ {
struct lmac *lmac; struct lmac *lmac;
u64 link; u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work); lmac = container_of(work, struct lmac, dwork.work);
...@@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work) ...@@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false); SPU_STATUS1_RCV_LNK, false);
link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
if (link & SPU_STATUS1_RCV_LNK) { smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1; lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000; lmac->last_speed = 40000;
...@@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work) ...@@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
} }
if (lmac->last_link != lmac->link_up) { if (lmac->last_link != lmac->link_up) {
if (lmac->link_up) {
if (bgx_xaui_check_link(lmac)) {
/* Errors, clear link_up state */
lmac->link_up = 0;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
}
lmac->last_link = lmac->link_up; lmac->last_link = lmac->link_up;
if (lmac->link_up)
bgx_xaui_check_link(lmac);
} }
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
...@@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) ...@@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{ {
struct lmac *lmac; struct lmac *lmac;
u64 cmrx_cfg; u64 cfg;
lmac = &bgx->lmac[lmacid]; lmac = &bgx->lmac[lmacid];
if (lmac->check_link) { if (lmac->check_link) {
...@@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) ...@@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link); destroy_workqueue(lmac->check_link);
} }
cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); /* Disable packet reception */
cmrx_cfg &= ~(1 << 15); cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); cfg &= ~CMR_PKT_RX_EN;
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
/* Give chance for Rx/Tx FIFO to get drained */
bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
/* Disable packet transmission */
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
cfg &= ~CMR_PKT_TX_EN;
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
/* Disable serdes lanes */
if (!lmac->is_sgmii)
bgx_reg_modify(bgx, lmacid,
BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
else
bgx_reg_modify(bgx, lmacid,
BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
/* Disable LMAC */
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
cfg &= ~CMR_EN;
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
bgx_flush_dmac_addrs(bgx, lmacid); bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) && if ((bgx->lmac_type != BGX_MODE_XFI) &&
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0 #define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8 #define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8 #define BGX_CMRX_RX_DMAC_CTL 0x0E8
#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200 #define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48) #define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49) #define RX_DMACX_CAM_LMACID(x) (x << 49)
...@@ -50,6 +51,7 @@ ...@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450 #define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460 #define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468 #define BGX_CMR_RX_LMACS 0x468
#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600 #define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608 #define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610 #define BGX_CMRX_TX_STAT2 0x610
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册