mxgpu_ai.c 11.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include "amdgpu.h"
25 26
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
27 28
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
29
#include "mp/mp_9_0_offset.h"
30
#include "soc15.h"
31
#include "vega10_ih.h"
32 33 34
#include "soc15_common.h"
#include "mxgpu_ai.h"

35 36
#include "amdgpu_reset.h"

37 38
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
{
39
	WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
40 41 42 43
}

static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
{
44 45
	WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
}
46

47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
 * by host.
 *
 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
 * correct value since it doesn't return the RCV_DW0 under the case that
 * RCV_MSG_VALID is set by host.
 */
static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
{
	return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
				mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
60 61
}

62

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
				   enum idh_event event)
{
	u32 reg;

	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
					     mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
	if (reg != event)
		return -ENOENT;

	xgpu_ai_mailbox_send_ack(adev);

	return 0;
}

78 79 80 81
static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
	return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
}

82 83
static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
{
84 85 86 87 88 89 90
	int timeout  = AI_MAILBOX_POLL_ACK_TIMEDOUT;
	u8 reg;

	do {
		reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
		if (reg & 2)
			return 0;
91

92 93
		mdelay(5);
		timeout -= 5;
94
	} while (timeout > 1);
95

96
	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
97

98
	return -ETIME;
99 100
}

M
Monk Liu 已提交
101
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
102
{
103
	int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
104

105
	do {
106
		r = xgpu_ai_mailbox_rcv_msg(adev, event);
107 108
		if (!r)
			return 0;
109

110 111 112 113 114 115 116
		msleep(10);
		timeout -= 10;
	} while (timeout > 1);

	pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);

	return -ETIME;
117 118
}

119 120 121
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
	u32 reg;
122
	int r;
123 124 125 126 127 128 129 130 131 132 133 134
	uint8_t trn;

	/* IMPORTANT:
	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
	 * will return immediatly
	 */
	do {
		xgpu_ai_mailbox_set_valid(adev, false);
		trn = xgpu_ai_peek_ack(adev);
		if (trn) {
135
			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
136 137 138
			msleep(1);
		}
	} while(trn);
139

140 141 142 143 144 145 146 147 148 149 150 151 152 153
	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
			    MSGBUF_DATA, req);
	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
		      reg);
	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
				data1);
	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
				data2);
	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
				data3);

	xgpu_ai_mailbox_set_valid(adev, true);
154 155 156 157

	/* start to poll ack */
	r = xgpu_ai_poll_ack(adev);
	if (r)
158
		pr_err("Doesn't get ack from pf, continue\n");
159 160

	xgpu_ai_mailbox_set_valid(adev, false);
161 162 163 164 165 166 167 168
}

static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
					enum idh_request req)
{
	int r;

	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
169 170 171 172 173

	/* start to check msg if request is idh_req_gpu_init_access */
	if (req == IDH_REQ_GPU_INIT_ACCESS ||
		req == IDH_REQ_GPU_FINI_ACCESS ||
		req == IDH_REQ_GPU_RESET_ACCESS) {
M
Monk Liu 已提交
174
		r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
175 176
		if (r) {
			pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
177
			return r;
178
		}
179
		/* Retrieve checksum from mailbox2 */
180
		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
181 182 183 184
			adev->virt.fw_reserve.checksum_key =
				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
		}
185 186 187 188 189
	} else if (req == IDH_REQ_GPU_INIT_DATA){
		/* Dummy REQ_GPU_INIT_DATA handling */
		r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
		/* version set to 0 since dummy */
		adev->virt.req_init_data_ver = 0;	
190 191 192 193 194
	}

	return 0;
}

195 196
static int xgpu_ai_request_reset(struct amdgpu_device *adev)
{
197 198 199 200 201 202 203 204 205 206
	int ret, i = 0;

	while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
		ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
		if (!ret)
			break;
		i++;
	}

	return ret;
207 208
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
					   bool init)
{
	enum idh_request req;

	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
	return xgpu_ai_send_access_requests(adev, req);
}

static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
					   bool init)
{
	enum idh_request req;
	int r = 0;

	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
	r = xgpu_ai_send_access_requests(adev, req);

	return r;
}

230 231 232 233
static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					struct amdgpu_iv_entry *entry)
{
234
	DRM_DEBUG("get ack intr and do nothing.\n");
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	return 0;
}

static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{
	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));

	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
				(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);

	return 0;
}

static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
256 257 258 259 260 261
	int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;

	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
	 * otherwise the mailbox msg will be ruined/reseted by
	 * the VF FLR.
	 */
262
	if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
263 264
		return;

265
	down_write(&adev->reset_domain->sem);
266

267
	amdgpu_virt_fini_data_exchange(adev);
268

269 270
	xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);

271 272 273 274 275 276 277 278 279
	do {
		if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
			goto flr_done;

		msleep(10);
		timeout -= 10;
	} while (timeout > 1);

flr_done:
280
	atomic_set(&adev->reset_domain->in_gpu_reset, 0);
281
	up_write(&adev->reset_domain->sem);
282 283

	/* Trigger recovery for world switch failure if no TDR */
284
	if (amdgpu_device_should_recover_gpu(adev)
285
		&& (!amdgpu_device_has_job_running(adev) ||
286 287 288 289 290 291 292
			adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
		struct amdgpu_reset_context reset_context;
		memset(&reset_context, 0, sizeof(reset_context));

		reset_context.method = AMD_RESET_METHOD_NONE;
		reset_context.reset_req_dev = adev;
		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
293
		clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
294 295 296

		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
	}
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
}

static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
				       struct amdgpu_irq_src *src,
				       unsigned type,
				       enum amdgpu_interrupt_state state)
{
	u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));

	tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);

	return 0;
}

static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
				   struct amdgpu_irq_src *source,
				   struct amdgpu_iv_entry *entry)
{
317 318 319 320
	enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);

	switch (event) {
		case IDH_FLR_NOTIFICATION:
321
		if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
322 323
			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
								&adev->virt.flr_work),
324 325
				  "Failed to queue work! at %s",
				  __func__);
326
		break;
327 328 329
		case IDH_QUERY_ALIVE:
			xgpu_ai_mailbox_send_ack(adev);
			break;
330 331 332
		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
		 * it byfar since that polling thread will handle it,
		 * other msg like flr complete is not handled here.
333
		 */
334 335 336 337 338
		case IDH_CLR_MSG_BUF:
		case IDH_FLR_NOTIFICATION_CMPL:
		case IDH_READY_TO_ACCESS_GPU:
		default:
		break;
339
	}
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365

	return 0;
}

static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
	.set = xgpu_ai_set_mailbox_ack_irq,
	.process = xgpu_ai_mailbox_ack_irq,
};

static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
	.set = xgpu_ai_set_mailbox_rcv_irq,
	.process = xgpu_ai_mailbox_rcv_irq,
};

void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
{
	adev->virt.ack_irq.num_types = 1;
	adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
	adev->virt.rcv_irq.num_types = 1;
	adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
}

int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
{
	int r;

366
	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
367 368 369
	if (r)
		return r;

370
	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	if (r) {
		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
		return r;
	}

	return 0;
}

int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
{
	int r;

	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
	if (r)
		return r;
	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
	if (r) {
		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
		return r;
	}

	INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);

	return 0;
}

void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
{
	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}

403 404 405 406 407
static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
{
	return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}

408 409 410
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
	.req_full_gpu	= xgpu_ai_request_full_gpu_access,
	.rel_full_gpu	= xgpu_ai_release_full_gpu_access,
411
	.reset_gpu = xgpu_ai_request_reset,
412
	.wait_reset = NULL,
413
	.trans_msg = xgpu_ai_mailbox_trans_msg,
414
	.req_init_data  = xgpu_ai_request_init_data,
415
};