提交 48527e52 编写于 作者: M Monk Liu 提交者: Alex Deucher

drm/amdgpu: refactoring mailbox to fix TDR handshake bugs(v2)

this patch actually refactor mailbox implmentations, and
all below changes are needed together to fix all those mailbox
handshake issues exposured by heavey TDR test.

1)refactor all mailbox functions based on byte accessing for mb_control
reason is to avoid touching non-related bits when writing trn/rcv part of
mailbox_control, this way some incorrect INTR sent to hypervisor
side could be avoided, and it fixes couple handshake bug.

2)trans_msg function re-impled: put a invalid
logic before transmitting message to make sure the ACK bit is in
a clear status, otherwise there is chance that ACK asserted already
before transmitting message and lead to fake ACK polling.
(hypervisor side have some tricks to workaround ACK bit being corrupted
by VF FLR which hase an side effects that may make guest side ACK bit
asserted wrongly), and clear TRANS_MSG words after message transferred.

3)for mailbox_flr_work, it is also re-worked: it takes the mutex lock
first if invoked, to block gpu recover's participate too early while
hypervisor side is doing VF FLR. (hypervisor sends FLR_NOTIFY to guest
before doing VF FLR and sentds FLR_COMPLETE after VF FLR done, and
the FLR_NOTIFY will trigger interrupt to guest which lead to
mailbox_flr_work being invoked)

This can avoid the issue that mailbox trans msg being cleared by its VF FLR.

4)for mailbox_rcv_irq IRQ routine, it should only peek msg and schedule
mailbox_flr_work, instead of ACK to hypervisor itself, because FLR_NOTIFY
msg sent from hypervisor side doesn't need VF's ACK (this is because
VF's ACK would lead to hypervisor clear its trans_valid/msg, and this
would cause handshake bug if trans_valid/msg is cleared not due to
correct VF ACK but from a wrong VF ACK like this "FLR_NOTIFY" one)

This fixed handshake bug that sometimes GUEST always couldn't receive
"READY_TO_ACCESS_GPU" msg from hypervisor.

5)seperate polling time limite accordingly:
POLL ACK cost no more than 500ms
POLL MSG cost no more than 12000ms
POLL FLR finish cost no more than 500ms

6) we still need to set adev into in_gpu_reset mode after we received
FLR_NOTIFY from host side, this can prevent innocent app wrongly succesed
to open amdgpu dri device.

FLR_NOFITY is received due to an IDLE hang detected from hypervisor side
which indicating GPU is already die in this VF.

v2:
use MACRO as the offset of mailbox_control register
don't test if NOTIFY_CMPL event in rcv_msg since it won't
recieve that message anymore
Signed-off-by: NMonk Liu <Monk.Liu@amd.com>
Reviewed-by: NPixel Ding <Pixel.Ding@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 421a2a30
...@@ -33,56 +33,34 @@ ...@@ -33,56 +33,34 @@
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
{ {
u32 reg; WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
int timeout = AI_MAILBOX_TIMEDOUT;
u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_CONTROL));
reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
/*Wait for RCV_MSG_VALID to be 0*/
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_CONTROL));
while (reg & mask) {
if (timeout <= 0) {
pr_err("RCV_MSG_VALID is not cleared\n");
break;
}
mdelay(1);
timeout -=1;
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_CONTROL));
}
} }
static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
{ {
u32 reg; WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
}
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, /*
mmBIF_BX_PF0_MAILBOX_CONTROL)); * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
TRN_MSG_VALID, val ? 1 : 0); * by host.
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL), *
reg); * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
* correct value since it doesn't return the RCV_DW0 under the case that
* RCV_MSG_VALID is set by host.
*/
static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
{
return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
} }
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event) enum idh_event event)
{ {
u32 reg; u32 reg;
u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
if (event != IDH_FLR_NOTIFICATION_CMPL) {
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_CONTROL));
if (!(reg & mask))
return -ENOENT;
}
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
...@@ -94,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, ...@@ -94,54 +72,67 @@ static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
return 0; return 0;
} }
static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
}
static int xgpu_ai_poll_ack(struct amdgpu_device *adev) static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
{ {
int r = 0, timeout = AI_MAILBOX_TIMEDOUT; int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK); u8 reg;
u32 reg;
do {
reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
if (reg & 2)
return 0;
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_CONTROL));
while (!(reg & mask)) {
if (timeout <= 0) {
pr_err("Doesn't get ack from pf.\n");
r = -ETIME;
break;
}
mdelay(5); mdelay(5);
timeout -= 5; timeout -= 5;
} while (timeout > 1);
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
mmBIF_BX_PF0_MAILBOX_CONTROL));
}
return r; return -ETIME;
} }
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{ {
int r = 0, timeout = AI_MAILBOX_TIMEDOUT; int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
do {
r = xgpu_ai_mailbox_rcv_msg(adev, event); r = xgpu_ai_mailbox_rcv_msg(adev, event);
while (r) { if (!r)
if (timeout <= 0) { return 0;
pr_err("Doesn't get msg:%d from pf.\n", event);
r = -ETIME;
break;
}
mdelay(5);
timeout -= 5;
r = xgpu_ai_mailbox_rcv_msg(adev, event); msleep(10);
} timeout -= 10;
} while (timeout > 1);
return r; pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
} }
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3) { enum idh_request req, u32 data1, u32 data2, u32 data3) {
u32 reg; u32 reg;
int r; int r;
uint8_t trn;
/* IMPORTANT:
* clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
* and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
* which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
* will return immediatly
*/
do {
xgpu_ai_mailbox_set_valid(adev, false);
trn = xgpu_ai_peek_ack(adev);
if (trn) {
pr_err("trn=%x ACK should not asssert! wait again !\n", trn);
msleep(1);
}
} while(trn);
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
...@@ -245,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) ...@@ -245,15 +236,36 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{ {
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
int locked;
/* wait until RCV_MSG become 3 */ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { * otherwise the mailbox msg will be ruined/reseted by
pr_err("failed to recieve FLR_CMPL\n"); * the VF FLR.
return; *
} * we can unlock the lock_reset to allow "amdgpu_job_timedout"
* to run gpu_recover() after FLR_NOTIFICATION_CMPL received
* which means host side had finished this VF's FLR.
*/
locked = mutex_trylock(&adev->lock_reset);
if (locked)
adev->in_gpu_reset = 1;
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
/* Trigger recovery due to world switch failure */ msleep(10);
amdgpu_device_gpu_recover(adev, NULL, false); timeout -= 10;
} while (timeout > 1);
flr_done:
if (locked)
mutex_unlock(&adev->lock_reset);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_lockup_timeout == 0)
amdgpu_device_gpu_recover(adev, NULL, true);
} }
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
...@@ -274,24 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, ...@@ -274,24 +286,22 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
int r; enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
/* trigger gpu-reset by hypervisor only if TDR disbaled */
if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
/* sometimes the interrupt is delayed to inject to VM, so under such case
* the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus
* above recieve message could be failed, we should schedule the flr_work
* anyway
*/
if (r) {
DRM_ERROR("FLR_NOTIFICATION is missed\n");
xgpu_ai_mailbox_send_ack(adev);
}
switch (event) {
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev))
schedule_work(&adev->virt.flr_work); schedule_work(&adev->virt.flr_work);
break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
*/
case IDH_CLR_MSG_BUF:
case IDH_FLR_NOTIFICATION_CMPL:
case IDH_READY_TO_ACCESS_GPU:
default:
break;
} }
return 0; return 0;
......
...@@ -24,7 +24,9 @@ ...@@ -24,7 +24,9 @@
#ifndef __MXGPU_AI_H__ #ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__ #define __MXGPU_AI_H__
#define AI_MAILBOX_TIMEDOUT 12000 #define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
enum idh_request { enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1, IDH_REQ_GPU_INIT_ACCESS = 1,
...@@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev); ...@@ -51,4 +53,7 @@ int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev); int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev); void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
#define AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4
#define AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL) * 4 + 1
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册