amdgpu_xgmi.c 23.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/list.h>
#include "amdgpu.h"
26
#include "amdgpu_xgmi.h"
27
#include "amdgpu_ras.h"
28
#include "soc15.h"
29
#include "df/df_3_6_offset.h"
30 31 32 33
#include "xgmi/xgmi_4_0_0_smn.h"
#include "xgmi/xgmi_4_0_0_sh_mask.h"
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
34 35 36 37 38

static DEFINE_MUTEX(xgmi_mutex);

#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4

39
static LIST_HEAD(xgmi_hive_list);
40

41 42 43 44 45 46 47 48 49 50
static const int xgmi_pcs_err_status_reg_vg20[] = {
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
};

static const int wafl_pcs_err_status_reg_vg20[] = {
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
static const int xgmi_pcs_err_status_reg_arct[] = {
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
};

/* same as vg20*/
static const int wafl_pcs_err_status_reg_arct[] = {
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
	{"XGMI PCS DataLossErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
	{"XGMI PCS TrainingErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
	{"XGMI PCS CRCErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
	{"XGMI PCS BERExceededErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
	{"XGMI PCS TxMetaDataErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
	{"XGMI PCS ReplayBufParityErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
	{"XGMI PCS DataParityErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
	{"XGMI PCS ReplayFifoOverflowErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
	{"XGMI PCS ReplayFifoUnderflowErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
	{"XGMI PCS ElasticFifoOverflowErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
	{"XGMI PCS DeskewErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
	{"XGMI PCS DataStartupLimitErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
	{"XGMI PCS FCInitTimeoutErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
	{"XGMI PCS RecoveryTimeoutErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
	{"XGMI PCS ReadySerialTimeoutErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
	{"XGMI PCS ReadySerialAttemptErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
	{"XGMI PCS RecoveryAttemptErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
	{"XGMI PCS RecoveryRelockAttemptErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};

static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
	{"WAFL PCS DataLossErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
	{"WAFL PCS TrainingErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
	{"WAFL PCS CRCErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
	{"WAFL PCS BERExceededErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
	{"WAFL PCS TxMetaDataErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
	{"WAFL PCS ReplayBufParityErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
	{"WAFL PCS DataParityErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
	{"WAFL PCS ReplayFifoOverflowErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
	{"WAFL PCS ReplayFifoUnderflowErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
	{"WAFL PCS ElasticFifoOverflowErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
	{"WAFL PCS DeskewErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
	{"WAFL PCS DataStartupLimitErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
	{"WAFL PCS FCInitTimeoutErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
	{"WAFL PCS RecoveryTimeoutErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
	{"WAFL PCS ReadySerialTimeoutErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
	{"WAFL PCS ReadySerialAttemptErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
	{"WAFL PCS RecoveryAttemptErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
	{"WAFL PCS RecoveryRelockAttemptErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
/**
 * DOC: AMDGPU XGMI Support
 *
 * XGMI is a high speed interconnect that joins multiple GPU cards
 * into a homogeneous memory space that is organized by a collective
 * hive ID and individual node IDs, both of which are 64-bit numbers.
 *
 * The file xgmi_device_id contains the unique per GPU device ID and
 * is stored in the /sys/class/drm/card${cardno}/device/ directory.
 *
 * Inside the device directory a sub-directory 'xgmi_hive_info' is
 * created which contains the hive ID and the list of nodes.
 *
 * The hive ID is stored in:
 *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
 *
 * The node information is stored in numbered directories:
 *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
 *
 * Each device has their own xgmi_hive_info direction with a mirror
 * set of node sub-directories.
 *
 * The XGMI memory space is built by contiguously adding the power of
 * two padded VRAM space from each node to each other.
 *
 */

171 172 173 174
static struct attribute amdgpu_xgmi_hive_id = {
	.name = "xgmi_hive_id",
	.mode = S_IRUGO
};
175

176 177 178 179
static struct attribute *amdgpu_xgmi_hive_attrs[] = {
	&amdgpu_xgmi_hive_id,
	NULL
};
180

181 182
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
	struct attribute *attr, char *buf)
183
{
184 185
	struct amdgpu_hive_info *hive = container_of(
		kobj, struct amdgpu_hive_info, kobj);
186

187 188
	if (attr == &amdgpu_xgmi_hive_id)
		return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
189

190
	return 0;
191 192
}

193
static void amdgpu_xgmi_hive_release(struct kobject *kobj)
194
{
195 196 197 198 199
	struct amdgpu_hive_info *hive = container_of(
		kobj, struct amdgpu_hive_info, kobj);

	mutex_destroy(&hive->hive_lock);
	kfree(hive);
200 201
}

202 203 204 205 206 207 208 209 210 211
static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
	.show = amdgpu_xgmi_show_attrs,
};

struct kobj_type amdgpu_xgmi_hive_type = {
	.release = amdgpu_xgmi_hive_release,
	.sysfs_ops = &amdgpu_xgmi_hive_ops,
	.default_attrs = amdgpu_xgmi_hive_attrs,
};

212 213 214 215 216
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
217
	struct amdgpu_device *adev = drm_to_adev(ddev);
218

219
	return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
220 221 222

}

223 224 225 226 227 228
#define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
229
	struct amdgpu_device *adev = drm_to_adev(ddev);
230 231 232 233 234 235
	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
	uint64_t fica_out;
	unsigned int error_count = 0;

	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
236

237
	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
238 239 240
	if (fica_out != 0x1f)
		pr_err("xGMI error counters not enabled!\n");

241
	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
242 243 244

	if ((fica_out & 0xffff) == 2)
		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
245

246
	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
247

248
	return sysfs_emit(buf, "%u\n", error_count);
249 250 251 252 253
}


static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
254 255 256 257 258 259 260 261 262 263 264 265 266 267

static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
					 struct amdgpu_hive_info *hive)
{
	int ret = 0;
	char node[10] = { 0 };

	/* Create xgmi device id file */
	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
		return ret;
	}

268 269 270 271 272 273
	/* Create xgmi error file */
	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
	if (ret)
		pr_err("failed to create xgmi_error\n");


274
	/* Create sysfs link to hive info folder on the first device */
275 276
	if (hive->kobj.parent != (&adev->dev->kobj)) {
		ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
277 278 279 280 281 282 283
					"xgmi_hive_info");
		if (ret) {
			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
			goto remove_file;
		}
	}

284
	sprintf(node, "node%d", atomic_read(&hive->number_devices));
285
	/* Create sysfs link form the hive folder to yourself */
286
	ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
287 288 289 290 291 292 293 294 295
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
		goto remove_link;
	}

	goto success;


remove_link:
296
	sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
297 298 299 300 301 302 303 304 305 306 307

remove_file:
	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);

success:
	return ret;
}

static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
					  struct amdgpu_hive_info *hive)
{
308 309 310
	char node[10];
	memset(node, 0, sizeof(node));

311
	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
312 313
	device_remove_file(adev->dev, &dev_attr_xgmi_error);

314
	if (hive->kobj.parent != (&adev->dev->kobj))
315 316
		sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");

317 318
	sprintf(node, "node%d", atomic_read(&hive->number_devices));
	sysfs_remove_link(&hive->kobj, node);
319

320 321 322 323
}



324
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
325
{
326
	struct amdgpu_hive_info *hive = NULL;
327
	int ret;
328 329 330

	if (!adev->gmc.xgmi.hive_id)
		return NULL;
331

332 333 334 335 336
	if (adev->hive) {
		kobject_get(&adev->hive->kobj);
		return adev->hive;
	}

337 338
	mutex_lock(&xgmi_mutex);

339 340 341
	list_for_each_entry(hive, &xgmi_hive_list, node)  {
		if (hive->hive_id == adev->gmc.xgmi.hive_id)
			goto pro_end;
342
	}
343 344 345 346 347 348

	hive = kzalloc(sizeof(*hive), GFP_KERNEL);
	if (!hive) {
		dev_err(adev->dev, "XGMI: allocation failed\n");
		hive = NULL;
		goto pro_end;
349
	}
350 351

	/* initialize new hive if not exist */
352 353 354 355 356 357 358 359 360
	ret = kobject_init_and_add(&hive->kobj,
			&amdgpu_xgmi_hive_type,
			&adev->dev->kobj,
			"%s", "xgmi_hive_info");
	if (ret) {
		dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
		kfree(hive);
		hive = NULL;
		goto pro_end;
361 362
	}

363 364 365 366 367 368 369 370 371
	hive->hive_id = adev->gmc.xgmi.hive_id;
	INIT_LIST_HEAD(&hive->device_list);
	INIT_LIST_HEAD(&hive->node);
	mutex_init(&hive->hive_lock);
	atomic_set(&hive->in_reset, 0);
	atomic_set(&hive->number_devices, 0);
	task_barrier_init(&hive->tb);
	hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
	hive->hi_req_gpu = NULL;
372 373 374 375
	/*
	 * hive pstate on boot is high in vega20 so we have to go to low
	 * pstate on after boot.
	 */
376 377 378 379 380 381
	hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
	list_add_tail(&hive->node, &xgmi_hive_list);

pro_end:
	if (hive)
		kobject_get(&hive->kobj);
382
	mutex_unlock(&xgmi_mutex);
383 384
	return hive;
}
385

386 387 388 389
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
{
	if (hive)
		kobject_put(&hive->kobj);
390 391
}

392 393 394
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
	int ret = 0;
395 396
	struct amdgpu_hive_info *hive;
	struct amdgpu_device *request_adev;
397
	bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
398
	bool init_low;
399

400 401 402 403 404 405
	hive = amdgpu_get_xgmi_hive(adev);
	if (!hive)
		return 0;

	request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
	init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
406
	amdgpu_put_xgmi_hive(hive);
407
	/* fw bug so temporarily disable pstate switching */
408 409 410
	return 0;

	if (!hive || adev->asic_type != CHIP_VEGA20)
411 412
		return 0;

413
	mutex_lock(&hive->hive_lock);
414

415 416 417 418 419 420 421 422 423 424 425
	if (is_hi_req)
		hive->hi_req_count++;
	else
		hive->hi_req_count--;

	/*
	 * Vega20 only needs single peer to request pstate high for the hive to
	 * go high but all peers must request pstate low for the hive to go low
	 */
	if (hive->pstate == pstate ||
			(!is_hi_req && hive->hi_req_count && !init_low))
426
		goto out;
427

428
	dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
429

430
	ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
431
	if (ret) {
432
		dev_err(request_adev->dev,
433
			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
434 435
			request_adev->gmc.xgmi.node_id,
			request_adev->gmc.xgmi.hive_id, ret);
436 437 438
		goto out;
	}

439 440 441 442
	if (init_low)
		hive->pstate = hive->hi_req_count ?
					hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
	else {
443
		hive->pstate = pstate;
444 445 446
		hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
							adev : NULL;
	}
447
out:
448
	mutex_unlock(&hive->hive_lock);
449 450 451
	return ret;
}

452 453
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
454
	int ret;
455 456 457

	/* Each psp need to set the latest topology */
	ret = psp_xgmi_set_topology_info(&adev->psp,
458
					 atomic_read(&hive->number_devices),
459
					 &adev->psp.xgmi_context.top_info);
460 461 462 463 464 465 466 467 468
	if (ret)
		dev_err(adev->dev,
			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);

	return ret;
}

469

470 471 472 473 474 475
/*
 * NOTE psp_xgmi_node_info.num_hops layout is as follows:
 * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
 * num_hops[5:3] = reserved
 * num_hops[2:0] = number of hops
 */
476 477 478 479
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
		struct amdgpu_device *peer_adev)
{
	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
480
	uint8_t num_hops_mask = 0x7;
481 482 483 484
	int i;

	for (i = 0 ; i < top->num_nodes; ++i)
		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
485
			return top->nodes[i].num_hops & num_hops_mask;
486 487 488
	return	-EINVAL;
}

489 490
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
491
	struct psp_xgmi_topology_info *top_info;
492 493
	struct amdgpu_hive_info *hive;
	struct amdgpu_xgmi	*entry;
494
	struct amdgpu_device *tmp_adev = NULL;
495

496
	int count = 0, ret = 0;
497

498
	if (!adev->gmc.xgmi.supported)
499
		return 0;
500

501 502
	if (!adev->gmc.xgmi.pending_reset &&
	    amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
503 504 505 506 507 508 509
		ret = psp_xgmi_initialize(&adev->psp);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to initialize xgmi session\n");
			return ret;
		}

510 511 512 513 514 515
		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to get hive id\n");
			return ret;
		}
516

517 518 519 520 521 522 523 524 525
		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to get node id\n");
			return ret;
		}
	} else {
		adev->gmc.xgmi.hive_id = 16;
		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
526
	}
527

528
	hive = amdgpu_get_xgmi_hive(adev);
529 530 531
	if (!hive) {
		ret = -EINVAL;
		dev_err(adev->dev,
532
			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
533
			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
534
		goto exit;
535
	}
536
	mutex_lock(&hive->hive_lock);
537

538
	top_info = &adev->psp.xgmi_context.top_info;
539

540 541
	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
	list_for_each_entry(entry, &hive->device_list, head)
542
		top_info->nodes[count++].node_id = entry->node_id;
543
	top_info->num_nodes = count;
544
	atomic_set(&hive->number_devices, count);
545

546 547
	task_barrier_add_task(&hive->tb);

548 549
	if (!adev->gmc.xgmi.pending_reset &&
	    amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
550 551 552 553 554 555 556 557 558 559
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			/* update node list for other device in the hive */
			if (tmp_adev != adev) {
				top_info = &tmp_adev->psp.xgmi_context.top_info;
				top_info->nodes[count - 1].node_id =
					adev->gmc.xgmi.node_id;
				top_info->num_nodes = count;
			}
			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
			if (ret)
D
Dennis Li 已提交
560
				goto exit_unlock;
561 562
		}

563 564 565 566 567 568 569 570 571 572
		/* get latest topology info for each device from psp */
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
					&tmp_adev->psp.xgmi_context.top_info);
			if (ret) {
				dev_err(tmp_adev->dev,
					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
					tmp_adev->gmc.xgmi.node_id,
					tmp_adev->gmc.xgmi.hive_id, ret);
				/* To do : continue with some node failed or disable the whole hive */
D
Dennis Li 已提交
573
				goto exit_unlock;
574
			}
575
		}
576
	}
577

578
	if (!ret && !adev->gmc.xgmi.pending_reset)
579 580
		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);

D
Dennis Li 已提交
581
exit_unlock:
582 583
	mutex_unlock(&hive->hive_lock);
exit:
584 585
	if (!ret) {
		adev->hive = hive;
586 587
		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
588 589
	} else {
		amdgpu_put_xgmi_hive(hive);
590 591 592
		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
			ret);
593
	}
594

595 596
	return ret;
}
597

598
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
599
{
600
	struct amdgpu_hive_info *hive = adev->hive;
601 602

	if (!adev->gmc.xgmi.supported)
603
		return -EINVAL;
604 605

	if (!hive)
606
		return -EINVAL;
607

608
	mutex_lock(&hive->hive_lock);
609 610
	task_barrier_rem_task(&hive->tb);
	amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
611 612 613
	if (hive->hi_req_gpu == adev)
		hive->hi_req_gpu = NULL;
	list_del(&adev->gmc.xgmi.head);
614 615
	mutex_unlock(&hive->hive_lock);

616 617 618 619 620 621 622 623 624 625
	amdgpu_put_xgmi_hive(hive);
	adev->hive = NULL;

	if (atomic_dec_return(&hive->number_devices) == 0) {
		/* Remove the hive from global hive list */
		mutex_lock(&xgmi_mutex);
		list_del(&hive->node);
		mutex_unlock(&xgmi_mutex);

		amdgpu_put_xgmi_hive(hive);
626
	}
627 628

	return psp_xgmi_terminate(&adev->psp);
629
}
630

631
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
632 633 634 635 636 637 638 639 640 641 642 643 644
{
	int r;
	struct ras_ih_if ih_info = {
		.cb = NULL,
	};
	struct ras_fs_if fs_info = {
		.sysfs_name = "xgmi_wafl_err_count",
	};

	if (!adev->gmc.xgmi.supported ||
	    adev->gmc.xgmi.num_physical_nodes == 0)
		return 0;

645
	adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
646

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	if (!adev->gmc.xgmi.ras_if) {
		adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
		if (!adev->gmc.xgmi.ras_if)
			return -ENOMEM;
		adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
		adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
		adev->gmc.xgmi.ras_if->sub_block_index = 0;
		strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
	}
	ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
	r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
				 &fs_info, &ih_info);
	if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
		kfree(adev->gmc.xgmi.ras_if);
		adev->gmc.xgmi.ras_if = NULL;
	}

	return r;
}
666

667
static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
668 669 670 671 672 673 674 675 676 677 678 679
{
	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
			adev->gmc.xgmi.ras_if) {
		struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
		struct ras_ih_if ih_info = {
			.cb = NULL,
		};

		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
		kfree(ras_if);
	}
}
680 681 682 683

uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
					   uint64_t addr)
{
684 685
	struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
	return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
686
}
687

688 689 690 691 692 693
static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
{
	WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
	WREG32_PCIE(pcs_status_reg, 0);
}

694
static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
{
	uint32_t i;

	switch (adev->asic_type) {
	case CHIP_ARCTURUS:
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
			pcs_clear_status(adev,
					 xgmi_pcs_err_status_reg_arct[i]);
		break;
	case CHIP_VEGA20:
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
			pcs_clear_status(adev,
					 xgmi_pcs_err_status_reg_vg20[i]);
		break;
	default:
		break;
	}
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
					      uint32_t value,
					      uint32_t *ue_count,
					      uint32_t *ce_count,
					      bool is_xgmi_pcs)
{
	int i;
	int ue_cnt;

	if (is_xgmi_pcs) {
		/* query xgmi pcs error status,
		 * only ue is supported */
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
			ue_cnt = (value &
				  xgmi_pcs_ras_fields[i].pcs_err_mask) >>
				  xgmi_pcs_ras_fields[i].pcs_err_shift;
			if (ue_cnt) {
				dev_info(adev->dev, "%s detected\n",
					 xgmi_pcs_ras_fields[i].err_name);
				*ue_count += ue_cnt;
			}
		}
	} else {
		/* query wafl pcs error status,
		 * only ue is supported */
		for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
			ue_cnt = (value &
				  wafl_pcs_ras_fields[i].pcs_err_mask) >>
				  wafl_pcs_ras_fields[i].pcs_err_shift;
			if (ue_cnt) {
				dev_info(adev->dev, "%s detected\n",
					 wafl_pcs_ras_fields[i].err_name);
				*ue_count += ue_cnt;
			}
		}
	}

	return 0;
}

754 755
static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
					     void *ras_error_status)
756 757 758 759 760 761 762 763 764 765 766 767 768
{
	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
	int i;
	uint32_t data;
	uint32_t ue_cnt = 0, ce_cnt = 0;

	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
		return -EINVAL;

	err_data->ue_count = 0;
	err_data->ce_count = 0;

	switch (adev->asic_type) {
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
	case CHIP_ARCTURUS:
		/* check xgmi pcs error */
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, true);
		}
		/* check wafl pcs error */
		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, false);
		}
		break;
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
	case CHIP_VEGA20:
	default:
		/* check xgmi pcs error */
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, true);
		}
		/* check wafl pcs error */
		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, false);
		}
		break;
	}

804
	adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
805

806 807 808 809 810
	err_data->ue_count += ue_cnt;
	err_data->ce_count += ce_cnt;

	return 0;
}
811 812 813 814 815 816 817

const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
	.ras_late_init = amdgpu_xgmi_ras_late_init,
	.ras_fini = amdgpu_xgmi_ras_fini,
	.query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
	.reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
};