amdgpu_xgmi.c 23.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/list.h>
#include "amdgpu.h"
26
#include "amdgpu_xgmi.h"
27
#include "amdgpu_smu.h"
28
#include "amdgpu_ras.h"
29
#include "soc15.h"
30
#include "df/df_3_6_offset.h"
31 32 33 34
#include "xgmi/xgmi_4_0_0_smn.h"
#include "xgmi/xgmi_4_0_0_sh_mask.h"
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
35 36 37 38 39

static DEFINE_MUTEX(xgmi_mutex);

#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4

40
static LIST_HEAD(xgmi_hive_list);
41

42 43 44 45 46 47 48 49 50 51
static const int xgmi_pcs_err_status_reg_vg20[] = {
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
};

static const int wafl_pcs_err_status_reg_vg20[] = {
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
static const int xgmi_pcs_err_status_reg_arct[] = {
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
};

/* same as vg20*/
static const int wafl_pcs_err_status_reg_arct[] = {
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
	{"XGMI PCS DataLossErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
	{"XGMI PCS TrainingErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
	{"XGMI PCS CRCErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
	{"XGMI PCS BERExceededErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
	{"XGMI PCS TxMetaDataErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
	{"XGMI PCS ReplayBufParityErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
	{"XGMI PCS DataParityErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
	{"XGMI PCS ReplayFifoOverflowErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
	{"XGMI PCS ReplayFifoUnderflowErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
	{"XGMI PCS ElasticFifoOverflowErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
	{"XGMI PCS DeskewErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
	{"XGMI PCS DataStartupLimitErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
	{"XGMI PCS FCInitTimeoutErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
	{"XGMI PCS RecoveryTimeoutErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
	{"XGMI PCS ReadySerialTimeoutErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
	{"XGMI PCS ReadySerialAttemptErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
	{"XGMI PCS RecoveryAttemptErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
	{"XGMI PCS RecoveryRelockAttemptErr",
	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};

static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
	{"WAFL PCS DataLossErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
	{"WAFL PCS TrainingErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
	{"WAFL PCS CRCErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
	{"WAFL PCS BERExceededErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
	{"WAFL PCS TxMetaDataErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
	{"WAFL PCS ReplayBufParityErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
	{"WAFL PCS DataParityErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
	{"WAFL PCS ReplayFifoOverflowErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
	{"WAFL PCS ReplayFifoUnderflowErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
	{"WAFL PCS ElasticFifoOverflowErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
	{"WAFL PCS DeskewErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
	{"WAFL PCS DataStartupLimitErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
	{"WAFL PCS FCInitTimeoutErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
	{"WAFL PCS RecoveryTimeoutErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
	{"WAFL PCS ReadySerialTimeoutErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
	{"WAFL PCS ReadySerialAttemptErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
	{"WAFL PCS RecoveryAttemptErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
	{"WAFL PCS RecoveryRelockAttemptErr",
	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
/**
 * DOC: AMDGPU XGMI Support
 *
 * XGMI is a high speed interconnect that joins multiple GPU cards
 * into a homogeneous memory space that is organized by a collective
 * hive ID and individual node IDs, both of which are 64-bit numbers.
 *
 * The file xgmi_device_id contains the unique per GPU device ID and
 * is stored in the /sys/class/drm/card${cardno}/device/ directory.
 *
 * Inside the device directory a sub-directory 'xgmi_hive_info' is
 * created which contains the hive ID and the list of nodes.
 *
 * The hive ID is stored in:
 *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
 *
 * The node information is stored in numbered directories:
 *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
 *
 * Each device has their own xgmi_hive_info direction with a mirror
 * set of node sub-directories.
 *
 * The XGMI memory space is built by contiguously adding the power of
 * two padded VRAM space from each node to each other.
 *
 */

172 173 174 175
static struct attribute amdgpu_xgmi_hive_id = {
	.name = "xgmi_hive_id",
	.mode = S_IRUGO
};
176

177 178 179 180
static struct attribute *amdgpu_xgmi_hive_attrs[] = {
	&amdgpu_xgmi_hive_id,
	NULL
};
181

182 183
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
	struct attribute *attr, char *buf)
184
{
185 186
	struct amdgpu_hive_info *hive = container_of(
		kobj, struct amdgpu_hive_info, kobj);
187

188 189
	if (attr == &amdgpu_xgmi_hive_id)
		return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
190

191
	return 0;
192 193
}

194
static void amdgpu_xgmi_hive_release(struct kobject *kobj)
195
{
196 197 198 199 200
	struct amdgpu_hive_info *hive = container_of(
		kobj, struct amdgpu_hive_info, kobj);

	mutex_destroy(&hive->hive_lock);
	kfree(hive);
201 202
}

203 204 205 206 207 208 209 210 211 212
static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
	.show = amdgpu_xgmi_show_attrs,
};

struct kobj_type amdgpu_xgmi_hive_type = {
	.release = amdgpu_xgmi_hive_release,
	.sysfs_ops = &amdgpu_xgmi_hive_ops,
	.default_attrs = amdgpu_xgmi_hive_attrs,
};

213 214 215 216 217
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
218
	struct amdgpu_device *adev = drm_to_adev(ddev);
219 220 221 222 223

	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);

}

224 225 226 227 228 229
#define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
230
	struct amdgpu_device *adev = drm_to_adev(ddev);
231 232 233 234 235 236
	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
	uint64_t fica_out;
	unsigned int error_count = 0;

	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
237

238
	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
239 240 241
	if (fica_out != 0x1f)
		pr_err("xGMI error counters not enabled!\n");

242
	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
243 244 245

	if ((fica_out & 0xffff) == 2)
		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
246

247
	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
248

249
	return snprintf(buf, PAGE_SIZE, "%u\n", error_count);
250 251 252 253 254
}


static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
255 256 257 258 259 260 261 262 263 264 265 266 267 268

static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
					 struct amdgpu_hive_info *hive)
{
	int ret = 0;
	char node[10] = { 0 };

	/* Create xgmi device id file */
	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
		return ret;
	}

269 270 271 272 273 274
	/* Create xgmi error file */
	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
	if (ret)
		pr_err("failed to create xgmi_error\n");


275
	/* Create sysfs link to hive info folder on the first device */
276 277
	if (hive->kobj.parent != (&adev->dev->kobj)) {
		ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
278 279 280 281 282 283 284
					"xgmi_hive_info");
		if (ret) {
			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
			goto remove_file;
		}
	}

285
	sprintf(node, "node%d", atomic_read(&hive->number_devices));
286
	/* Create sysfs link form the hive folder to yourself */
287
	ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
288 289 290 291 292 293 294 295 296
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
		goto remove_link;
	}

	goto success;


remove_link:
297
	sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
298 299 300 301 302 303 304 305 306 307 308

remove_file:
	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);

success:
	return ret;
}

static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
					  struct amdgpu_hive_info *hive)
{
309 310 311
	char node[10];
	memset(node, 0, sizeof(node));

312
	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
313 314
	device_remove_file(adev->dev, &dev_attr_xgmi_error);

315
	if (hive->kobj.parent != (&adev->dev->kobj))
316 317
		sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");

318 319
	sprintf(node, "node%d", atomic_read(&hive->number_devices));
	sysfs_remove_link(&hive->kobj, node);
320

321 322 323 324
}



325
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
326
{
327 328
	struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
	int ret;
329 330 331

	if (!adev->gmc.xgmi.hive_id)
		return NULL;
332

333 334 335 336 337
	if (adev->hive) {
		kobject_get(&adev->hive->kobj);
		return adev->hive;
	}

338 339
	mutex_lock(&xgmi_mutex);

340 341 342 343
	if (!list_empty(&xgmi_hive_list)) {
		list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node)  {
			if (hive->hive_id == adev->gmc.xgmi.hive_id)
				goto pro_end;
344
		}
345
	}
346 347 348 349 350 351

	hive = kzalloc(sizeof(*hive), GFP_KERNEL);
	if (!hive) {
		dev_err(adev->dev, "XGMI: allocation failed\n");
		hive = NULL;
		goto pro_end;
352
	}
353 354

	/* initialize new hive if not exist */
355 356 357 358 359 360 361 362 363
	ret = kobject_init_and_add(&hive->kobj,
			&amdgpu_xgmi_hive_type,
			&adev->dev->kobj,
			"%s", "xgmi_hive_info");
	if (ret) {
		dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
		kfree(hive);
		hive = NULL;
		goto pro_end;
364 365
	}

366 367 368 369 370 371 372 373 374
	hive->hive_id = adev->gmc.xgmi.hive_id;
	INIT_LIST_HEAD(&hive->device_list);
	INIT_LIST_HEAD(&hive->node);
	mutex_init(&hive->hive_lock);
	atomic_set(&hive->in_reset, 0);
	atomic_set(&hive->number_devices, 0);
	task_barrier_init(&hive->tb);
	hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
	hive->hi_req_gpu = NULL;
375 376 377 378
	/*
	 * hive pstate on boot is high in vega20 so we have to go to low
	 * pstate on after boot.
	 */
379 380 381 382 383 384
	hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
	list_add_tail(&hive->node, &xgmi_hive_list);

pro_end:
	if (hive)
		kobject_get(&hive->kobj);
385
	mutex_unlock(&xgmi_mutex);
386 387
	return hive;
}
388

389 390 391 392
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
{
	if (hive)
		kobject_put(&hive->kobj);
393 394
}

395 396 397
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
	int ret = 0;
398 399
	struct amdgpu_hive_info *hive;
	struct amdgpu_device *request_adev;
400
	bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
401
	bool init_low;
402

403 404 405 406 407 408
	hive = amdgpu_get_xgmi_hive(adev);
	if (!hive)
		return 0;

	request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
	init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
409
	amdgpu_put_xgmi_hive(hive);
410
	/* fw bug so temporarily disable pstate switching */
411 412 413
	return 0;

	if (!hive || adev->asic_type != CHIP_VEGA20)
414 415
		return 0;

416
	mutex_lock(&hive->hive_lock);
417

418 419 420 421 422 423 424 425 426 427 428
	if (is_hi_req)
		hive->hi_req_count++;
	else
		hive->hi_req_count--;

	/*
	 * Vega20 only needs single peer to request pstate high for the hive to
	 * go high but all peers must request pstate low for the hive to go low
	 */
	if (hive->pstate == pstate ||
			(!is_hi_req && hive->hi_req_count && !init_low))
429
		goto out;
430

431
	dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
432

433
	ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
434
	if (ret) {
435
		dev_err(request_adev->dev,
436
			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
437 438
			request_adev->gmc.xgmi.node_id,
			request_adev->gmc.xgmi.hive_id, ret);
439 440 441
		goto out;
	}

442 443 444 445
	if (init_low)
		hive->pstate = hive->hi_req_count ?
					hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
	else {
446
		hive->pstate = pstate;
447 448 449
		hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
							adev : NULL;
	}
450
out:
451
	mutex_unlock(&hive->hive_lock);
452 453 454
	return ret;
}

455 456
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
457
	int ret;
458 459 460

	/* Each psp need to set the latest topology */
	ret = psp_xgmi_set_topology_info(&adev->psp,
461
					 atomic_read(&hive->number_devices),
462
					 &adev->psp.xgmi_context.top_info);
463 464 465 466 467 468 469 470 471
	if (ret)
		dev_err(adev->dev,
			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);

	return ret;
}

472 473 474 475 476 477 478 479 480 481 482 483 484

int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
		struct amdgpu_device *peer_adev)
{
	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
	int i;

	for (i = 0 ; i < top->num_nodes; ++i)
		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
			return top->nodes[i].num_hops;
	return	-EINVAL;
}

485 486
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
487
	struct psp_xgmi_topology_info *top_info;
488 489
	struct amdgpu_hive_info *hive;
	struct amdgpu_xgmi	*entry;
490
	struct amdgpu_device *tmp_adev = NULL;
491

492
	int count = 0, ret = 0;
493

494
	if (!adev->gmc.xgmi.supported)
495
		return 0;
496

497
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
498 499 500 501 502 503 504
		ret = psp_xgmi_initialize(&adev->psp);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to initialize xgmi session\n");
			return ret;
		}

505 506 507 508 509 510
		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to get hive id\n");
			return ret;
		}
511

512 513 514 515 516 517 518 519 520
		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to get node id\n");
			return ret;
		}
	} else {
		adev->gmc.xgmi.hive_id = 16;
		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
521
	}
522

523
	hive = amdgpu_get_xgmi_hive(adev);
524 525 526
	if (!hive) {
		ret = -EINVAL;
		dev_err(adev->dev,
527
			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
528
			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
529
		goto exit;
530
	}
531
	mutex_lock(&hive->hive_lock);
532

533
	top_info = &adev->psp.xgmi_context.top_info;
534

535 536
	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
	list_for_each_entry(entry, &hive->device_list, head)
537
		top_info->nodes[count++].node_id = entry->node_id;
538
	top_info->num_nodes = count;
539
	atomic_set(&hive->number_devices, count);
540

541 542
	task_barrier_add_task(&hive->tb);

543 544 545 546 547 548 549 550 551 552 553
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			/* update node list for other device in the hive */
			if (tmp_adev != adev) {
				top_info = &tmp_adev->psp.xgmi_context.top_info;
				top_info->nodes[count - 1].node_id =
					adev->gmc.xgmi.node_id;
				top_info->num_nodes = count;
			}
			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
			if (ret)
D
Dennis Li 已提交
554
				goto exit_unlock;
555 556
		}

557 558 559 560 561 562 563 564 565 566
		/* get latest topology info for each device from psp */
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
					&tmp_adev->psp.xgmi_context.top_info);
			if (ret) {
				dev_err(tmp_adev->dev,
					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
					tmp_adev->gmc.xgmi.node_id,
					tmp_adev->gmc.xgmi.hive_id, ret);
				/* To do : continue with some node failed or disable the whole hive */
D
Dennis Li 已提交
567
				goto exit_unlock;
568
			}
569
		}
570
	}
571

572 573 574
	if (!ret)
		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);

D
Dennis Li 已提交
575
exit_unlock:
576 577
	mutex_unlock(&hive->hive_lock);
exit:
578 579
	if (!ret) {
		adev->hive = hive;
580 581
		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
582 583
	} else {
		amdgpu_put_xgmi_hive(hive);
584 585 586
		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
			ret);
587
	}
588

589 590
	return ret;
}
591

592
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
593
{
594
	struct amdgpu_hive_info *hive = adev->hive;
595 596

	if (!adev->gmc.xgmi.supported)
597
		return -EINVAL;
598 599

	if (!hive)
600
		return -EINVAL;
601

602
	mutex_lock(&hive->hive_lock);
603 604
	task_barrier_rem_task(&hive->tb);
	amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
605 606 607
	if (hive->hi_req_gpu == adev)
		hive->hi_req_gpu = NULL;
	list_del(&adev->gmc.xgmi.head);
608 609
	mutex_unlock(&hive->hive_lock);

610 611 612 613 614 615 616 617 618 619
	amdgpu_put_xgmi_hive(hive);
	adev->hive = NULL;

	if (atomic_dec_return(&hive->number_devices) == 0) {
		/* Remove the hive from global hive list */
		mutex_lock(&xgmi_mutex);
		list_del(&hive->node);
		mutex_unlock(&xgmi_mutex);

		amdgpu_put_xgmi_hive(hive);
620
	}
621 622

	return psp_xgmi_terminate(&adev->psp);
623
}
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638

int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
{
	int r;
	struct ras_ih_if ih_info = {
		.cb = NULL,
	};
	struct ras_fs_if fs_info = {
		.sysfs_name = "xgmi_wafl_err_count",
	};

	if (!adev->gmc.xgmi.supported ||
	    adev->gmc.xgmi.num_physical_nodes == 0)
		return 0;

639 640
	amdgpu_xgmi_reset_ras_error_count(adev);

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	if (!adev->gmc.xgmi.ras_if) {
		adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
		if (!adev->gmc.xgmi.ras_if)
			return -ENOMEM;
		adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
		adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
		adev->gmc.xgmi.ras_if->sub_block_index = 0;
		strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
	}
	ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
	r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
				 &fs_info, &ih_info);
	if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
		kfree(adev->gmc.xgmi.ras_if);
		adev->gmc.xgmi.ras_if = NULL;
	}

	return r;
}
660 661 662 663 664 665 666 667 668 669 670 671 672 673

void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
{
	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
			adev->gmc.xgmi.ras_if) {
		struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
		struct ras_ih_if ih_info = {
			.cb = NULL,
		};

		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
		kfree(ras_if);
	}
}
674 675 676 677

uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
					   uint64_t addr)
{
678 679
	struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
	return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
680
}
681

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
{
	WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
	WREG32_PCIE(pcs_status_reg, 0);
}

void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
	uint32_t i;

	switch (adev->asic_type) {
	case CHIP_ARCTURUS:
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
			pcs_clear_status(adev,
					 xgmi_pcs_err_status_reg_arct[i]);
		break;
	case CHIP_VEGA20:
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
			pcs_clear_status(adev,
					 xgmi_pcs_err_status_reg_vg20[i]);
		break;
	default:
		break;
	}
}

708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
					      uint32_t value,
					      uint32_t *ue_count,
					      uint32_t *ce_count,
					      bool is_xgmi_pcs)
{
	int i;
	int ue_cnt;

	if (is_xgmi_pcs) {
		/* query xgmi pcs error status,
		 * only ue is supported */
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
			ue_cnt = (value &
				  xgmi_pcs_ras_fields[i].pcs_err_mask) >>
				  xgmi_pcs_ras_fields[i].pcs_err_shift;
			if (ue_cnt) {
				dev_info(adev->dev, "%s detected\n",
					 xgmi_pcs_ras_fields[i].err_name);
				*ue_count += ue_cnt;
			}
		}
	} else {
		/* query wafl pcs error status,
		 * only ue is supported */
		for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
			ue_cnt = (value &
				  wafl_pcs_ras_fields[i].pcs_err_mask) >>
				  wafl_pcs_ras_fields[i].pcs_err_shift;
			if (ue_cnt) {
				dev_info(adev->dev, "%s detected\n",
					 wafl_pcs_ras_fields[i].err_name);
				*ue_count += ue_cnt;
			}
		}
	}

	return 0;
}

int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
				      void *ras_error_status)
{
	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
	int i;
	uint32_t data;
	uint32_t ue_cnt = 0, ce_cnt = 0;

	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
		return -EINVAL;

	err_data->ue_count = 0;
	err_data->ce_count = 0;

	switch (adev->asic_type) {
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	case CHIP_ARCTURUS:
		/* check xgmi pcs error */
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, true);
		}
		/* check wafl pcs error */
		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, false);
		}
		break;
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
	case CHIP_VEGA20:
	default:
		/* check xgmi pcs error */
		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, true);
		}
		/* check wafl pcs error */
		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
			if (data)
				amdgpu_xgmi_query_pcs_error_status(adev,
						data, &ue_cnt, &ce_cnt, false);
		}
		break;
	}

798 799
	amdgpu_xgmi_reset_ras_error_count(adev);

800 801 802 803 804
	err_data->ue_count += ue_cnt;
	err_data->ce_count += ce_cnt;

	return 0;
}