hclgevf_mbx.c 9.3 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.

#include "hclge_mbx.h"
#include "hclgevf_main.h"
#include "hnae3.h"

8 9 10 11 12
static int hclgevf_resp_to_errno(u16 resp_code)
{
	return resp_code ? -resp_code : 0;
}

13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
	/* this function should be called with mbx_resp.mbx_mutex held
	 * to prtect the received_response from race condition
	 */
	hdev->mbx_resp.received_resp  = false;
	hdev->mbx_resp.origin_mbx_msg = 0;
	hdev->mbx_resp.resp_status    = 0;
	memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
}

/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
 * message to PF.
 * @hdev: pointer to struct hclgevf_dev
 * @resp_msg: pointer to store the original message type and response status
 * @len: the resp_msg data array length.
 */
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
				u8 *resp_data, u16 resp_len)
{
#define HCLGEVF_MAX_TRY_TIMES	500
P
Peng Li 已提交
34
#define HCLGEVF_SLEEP_USECOND	1000
35 36 37 38 39 40
	struct hclgevf_mbx_resp_status *mbx_resp;
	u16 r_code0, r_code1;
	int i = 0;

	if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
		dev_err(&hdev->pdev->dev,
41
			"VF mbx response len(=%u) exceeds maximum(=%u)\n",
42 43 44 45 46 47
			resp_len,
			HCLGE_MBX_MAX_RESP_DATA_SIZE);
		return -EINVAL;
	}

	while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
48 49 50
		if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
			return -EIO;

P
Peng Li 已提交
51
		usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
52 53 54 55 56
		i++;
	}

	if (i >= HCLGEVF_MAX_TRY_TIMES) {
		dev_err(&hdev->pdev->dev,
57
			"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
58
			code0, code1, hdev->mbx_resp.received_resp, i);
59 60 61 62 63 64
		return -EIO;
	}

	mbx_resp = &hdev->mbx_resp;
	r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
	r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
65 66 67 68

	if (mbx_resp->resp_status)
		return mbx_resp->resp_status;

69 70 71 72 73 74 75
	if (resp_data)
		memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);

	hclgevf_reset_mbx_resp_status(hdev);

	if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
		dev_err(&hdev->pdev->dev,
76
			"VF could not match resp code(code0=%u,code1=%u), %d\n",
77
			code0, code1, mbx_resp->resp_status);
78
		dev_err(&hdev->pdev->dev,
79
			"VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
80
			r_code0, r_code1);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
		return -EIO;
	}

	return 0;
}

int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
			 const u8 *msg_data, u8 msg_len, bool need_resp,
			 u8 *resp_data, u16 resp_len)
{
	struct hclge_mbx_vf_to_pf_cmd *req;
	struct hclgevf_desc desc;
	int status;

	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;

	/* first two bytes are reserved for code & subcode */
	if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
		dev_err(&hdev->pdev->dev,
			"VF send mbx msg fail, msg len %d exceeds max len %d\n",
			msg_len, HCLGE_MBX_MAX_MSG_SIZE);
		return -EINVAL;
	}

	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
106 107
	req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
					  ~HCLGE_MBX_NEED_RESP_BIT;
108 109
	req->msg[0] = code;
	req->msg[1] = subcode;
110 111
	if (msg_data)
		memcpy(&req->msg[2], msg_data, msg_len);
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

	/* synchronous send */
	if (need_resp) {
		mutex_lock(&hdev->mbx_resp.mbx_mutex);
		hclgevf_reset_mbx_resp_status(hdev);
		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
		if (status) {
			dev_err(&hdev->pdev->dev,
				"VF failed(=%d) to send mbx message to PF\n",
				status);
			mutex_unlock(&hdev->mbx_resp.mbx_mutex);
			return status;
		}

		status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
					      resp_len);
		mutex_unlock(&hdev->mbx_resp.mbx_mutex);
	} else {
		/* asynchronous send */
		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
		if (status) {
			dev_err(&hdev->pdev->dev,
				"VF failed(=%d) to send mbx message to PF\n",
				status);
			return status;
		}
	}

	return status;
}

143 144 145 146 147 148 149
static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
	u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);

	return tail == hw->cmq.crq.next_to_use;
}

150 151 152 153 154 155
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
	struct hclgevf_mbx_resp_status *resp;
	struct hclge_mbx_pf_to_vf_cmd *req;
	struct hclgevf_cmq_ring *crq;
	struct hclgevf_desc *desc;
156 157
	u16 *msg_q;
	u16 flag;
158 159 160 161 162 163
	u8 *temp;
	int i;

	resp = &hdev->mbx_resp;
	crq = &hdev->hw.cmq.crq;

164
	while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
165 166 167 168 169
		if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
			dev_info(&hdev->pdev->dev, "vf crq need init\n");
			return;
		}

170 171 172
		desc = &crq->desc[crq->next_to_use];
		req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;

173
		flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
P
Peng Li 已提交
174
		if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
175
			dev_warn(&hdev->pdev->dev,
176
				 "dropped invalid mailbox message, code = %u\n",
177 178 179 180 181 182 183 184
				 req->msg[0]);

			/* dropping/not processing this invalid message */
			crq->desc[crq->next_to_use].flag = 0;
			hclge_mbx_ring_ptr_move_crq(crq);
			continue;
		}

185 186 187 188 189 190
		/* synchronous messages are time critical and need preferential
		 * treatment. Therefore, we need to acknowledge all the sync
		 * responses as quickly as possible so that waiting tasks do not
		 * timeout and simultaneously queue the async messages for later
		 * prcessing in context of mailbox task i.e. the slow path.
		 */
191 192 193 194
		switch (req->msg[0]) {
		case HCLGE_MBX_PF_VF_RESP:
			if (resp->received_resp)
				dev_warn(&hdev->pdev->dev,
195
					 "VF mbx resp flag not clear(%u)\n",
196 197 198 199 200
					 req->msg[1]);
			resp->received_resp = true;

			resp->origin_mbx_msg = (req->msg[1] << 16);
			resp->origin_mbx_msg |= req->msg[2];
201
			resp->resp_status = hclgevf_resp_to_errno(req->msg[3]);
202 203 204 205 206 207 208 209

			temp = (u8 *)&req->msg[4];
			for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
				resp->additional_info[i] = *temp;
				temp++;
			}
			break;
		case HCLGE_MBX_LINK_STAT_CHANGE:
210
		case HCLGE_MBX_ASSERTING_RESET:
211
		case HCLGE_MBX_LINK_STAT_MODE:
212
		case HCLGE_MBX_PUSH_VLAN_INFO:
213
		case HCLGE_MBX_PUSH_PROMISC_INFO:
214 215 216 217 218 219
			/* set this mbx event as pending. This is required as we
			 * might loose interrupt event when mbx task is busy
			 * handling. This shall be cleared when mbx task just
			 * enters handling state.
			 */
			hdev->mbx_event_pending = true;
220

221 222 223
			/* we will drop the async msg if we find ARQ as full
			 * and continue with next message
			 */
224 225
			if (atomic_read(&hdev->arq.count) >=
			    HCLGE_MBX_MAX_ARQ_MSG_NUM) {
226
				dev_warn(&hdev->pdev->dev,
227
					 "Async Q full, dropping msg(%u)\n",
228 229 230 231 232 233
					 req->msg[1]);
				break;
			}

			/* tail the async message in arq */
			msg_q = hdev->arq.msg_q[hdev->arq.tail];
234 235
			memcpy(&msg_q[0], req->msg,
			       HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
236
			hclge_mbx_tail_ptr_move_arq(hdev->arq);
237
			atomic_inc(&hdev->arq.count);
238 239

			hclgevf_mbx_task_schedule(hdev);
240 241 242 243

			break;
		default:
			dev_err(&hdev->pdev->dev,
244
				"VF received unsupported(%u) mbx msg from PF\n",
245 246 247
				req->msg[0]);
			break;
		}
248
		crq->desc[crq->next_to_use].flag = 0;
249 250 251 252 253 254 255
		hclge_mbx_ring_ptr_move_crq(crq);
	}

	/* Write back CMDQ_RQ header pointer, M7 need this pointer */
	hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
			  crq->next_to_use);
}
256

257 258 259 260 261 262 263 264
static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
				       u16 promisc_info)
{
	if (!promisc_info)
		dev_info(&hdev->pdev->dev,
			 "Promisc mode is closed by host for being untrusted.\n");
}

265 266
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
267
	enum hnae3_reset_type reset_type;
268 269
	u16 link_status, state;
	u16 *msg_q, *vlan_info;
270 271 272
	u8 duplex;
	u32 speed;
	u32 tail;
273
	u8 idx;
274 275 276 277 278 279 280 281 282 283

	/* we can safely clear it now as we are at start of the async message
	 * processing
	 */
	hdev->mbx_event_pending = false;

	tail = hdev->arq.tail;

	/* process all the async queue messages */
	while (tail != hdev->arq.head) {
284 285 286 287 288 289
		if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
			dev_info(&hdev->pdev->dev,
				 "vf crq need init in async\n");
			return;
		}

290 291 292 293
		msg_q = hdev->arq.msg_q[hdev->arq.head];

		switch (msg_q[0]) {
		case HCLGE_MBX_LINK_STAT_CHANGE:
294
			link_status = msg_q[1];
295
			memcpy(&speed, &msg_q[2], sizeof(speed));
296
			duplex = (u8)msg_q[4];
297 298 299 300 301

			/* update upper layer with new link link status */
			hclgevf_update_link_status(hdev, link_status);
			hclgevf_update_speed_duplex(hdev, speed, duplex);

302
			break;
303
		case HCLGE_MBX_LINK_STAT_MODE:
304
			idx = (u8)msg_q[1];
305 306 307 308 309 310 311
			if (idx)
				memcpy(&hdev->hw.mac.supported, &msg_q[2],
				       sizeof(unsigned long));
			else
				memcpy(&hdev->hw.mac.advertising, &msg_q[2],
				       sizeof(unsigned long));
			break;
312 313 314 315 316 317
		case HCLGE_MBX_ASSERTING_RESET:
			/* PF has asserted reset hence VF should go in pending
			 * state and poll for the hardware reset status till it
			 * has been completely reset. After this stack should
			 * eventually be re-initialized.
			 */
318
			reset_type = (enum hnae3_reset_type)msg_q[1];
319
			set_bit(reset_type, &hdev->reset_pending);
320 321 322
			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
			hclgevf_reset_task_schedule(hdev);

323
			break;
324
		case HCLGE_MBX_PUSH_VLAN_INFO:
325
			state = msg_q[1];
326 327 328 329
			vlan_info = &msg_q[1];
			hclgevf_update_port_base_vlan_info(hdev, state,
							   (u8 *)vlan_info, 8);
			break;
330 331 332
		case HCLGE_MBX_PUSH_PROMISC_INFO:
			hclgevf_parse_promisc_info(hdev, msg_q[1]);
			break;
333 334
		default:
			dev_err(&hdev->pdev->dev,
335
				"fetched unsupported(%u) message from arq\n",
336 337 338 339 340
				msg_q[0]);
			break;
		}

		hclge_mbx_head_ptr_move_arq(hdev->arq);
341
		atomic_dec(&hdev->arq.count);
342 343 344
		msg_q = hdev->arq.msg_q[hdev->arq.head];
	}
}