amdgpu_xgmi.c 13.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/list.h>
#include "amdgpu.h"
26
#include "amdgpu_xgmi.h"
27
#include "amdgpu_smu.h"
28
#include "amdgpu_ras.h"
29
#include "df/df_3_6_offset.h"
30 31 32 33 34 35 36 37 38

static DEFINE_MUTEX(xgmi_mutex);

#define AMDGPU_MAX_XGMI_HIVE			8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4

static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;

39 40 41 42 43
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
	return &hive->device_list;
}

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
/**
 * DOC: AMDGPU XGMI Support
 *
 * XGMI is a high speed interconnect that joins multiple GPU cards
 * into a homogeneous memory space that is organized by a collective
 * hive ID and individual node IDs, both of which are 64-bit numbers.
 *
 * The file xgmi_device_id contains the unique per GPU device ID and
 * is stored in the /sys/class/drm/card${cardno}/device/ directory.
 *
 * Inside the device directory a sub-directory 'xgmi_hive_info' is
 * created which contains the hive ID and the list of nodes.
 *
 * The hive ID is stored in:
 *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
 *
 * The node information is stored in numbered directories:
 *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
 *
 * Each device has their own xgmi_hive_info direction with a mirror
 * set of node sub-directories.
 *
 * The XGMI memory space is built by contiguously adding the power of
 * two padded VRAM space from each node to each other.
 *
 */


72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct amdgpu_hive_info *hive =
			container_of(attr, struct amdgpu_hive_info, dev_attr);

	return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
}

static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
				    struct amdgpu_hive_info *hive)
{
	int ret = 0;

	if (WARN_ON(hive->kobj))
		return -EINVAL;

	hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
	if (!hive->kobj) {
		dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
		return -EINVAL;
	}

	hive->dev_attr = (struct device_attribute) {
		.attr = {
			.name = "xgmi_hive_id",
			.mode = S_IRUGO,

		},
		.show = amdgpu_xgmi_show_hive_id,
	};

	ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
		kobject_del(hive->kobj);
		kobject_put(hive->kobj);
		hive->kobj = NULL;
	}

	return ret;
}

static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
				    struct amdgpu_hive_info *hive)
{
	sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
	kobject_del(hive->kobj);
	kobject_put(hive->kobj);
	hive->kobj = NULL;
}

static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;

	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);

}

135 136 137 138 139 140 141 142 143 144 145 146 147
#define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
	struct amdgpu_device *adev = ddev->dev_private;
	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
	uint64_t fica_out;
	unsigned int error_count = 0;

	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
148

149 150 151 152 153 154 155 156
	fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in);
	if (fica_out != 0x1f)
		pr_err("xGMI error counters not enabled!\n");

	fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in);

	if ((fica_out & 0xffff) == 2)
		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
157

158 159 160 161 162 163 164 165
	adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);

	return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
}


static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
166 167 168 169 170 171 172 173 174 175 176 177 178 179

static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
					 struct amdgpu_hive_info *hive)
{
	int ret = 0;
	char node[10] = { 0 };

	/* Create xgmi device id file */
	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
		return ret;
	}

180 181 182 183 184 185
	/* Create xgmi error file */
	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
	if (ret)
		pr_err("failed to create xgmi_error\n");


186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	/* Create sysfs link to hive info folder on the first device */
	if (adev != hive->adev) {
		ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
					"xgmi_hive_info");
		if (ret) {
			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
			goto remove_file;
		}
	}

	sprintf(node, "node%d", hive->number_devices);
	/* Create sysfs link form the hive folder to yourself */
	ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
	if (ret) {
		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
		goto remove_link;
	}

	goto success;


remove_link:
	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);

remove_file:
	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);

success:
	return ret;
}

static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
					  struct amdgpu_hive_info *hive)
{
	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
	sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
	sysfs_remove_link(hive->kobj, adev->ddev->unique);
}



227
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
228 229 230 231 232 233
{
	int i;
	struct amdgpu_hive_info *tmp;

	if (!adev->gmc.xgmi.hive_id)
		return NULL;
234 235 236

	mutex_lock(&xgmi_mutex);

237 238
	for (i = 0 ; i < hive_count; ++i) {
		tmp = &xgmi_hives[i];
239 240 241 242
		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
			if (lock)
				mutex_lock(&tmp->hive_lock);
			mutex_unlock(&xgmi_mutex);
243
			return tmp;
244
		}
245
	}
246 247
	if (i >= AMDGPU_MAX_XGMI_HIVE) {
		mutex_unlock(&xgmi_mutex);
248
		return NULL;
249
	}
250 251 252

	/* initialize new hive if not exist */
	tmp = &xgmi_hives[hive_count++];
253 254 255 256 257 258 259

	if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
		mutex_unlock(&xgmi_mutex);
		return NULL;
	}

	tmp->adev = adev;
260 261
	tmp->hive_id = adev->gmc.xgmi.hive_id;
	INIT_LIST_HEAD(&tmp->device_list);
262
	mutex_init(&tmp->hive_lock);
263
	mutex_init(&tmp->reset_lock);
264

265 266
	if (lock)
		mutex_lock(&tmp->hive_lock);
267
	tmp->pstate = -1;
268
	mutex_unlock(&xgmi_mutex);
269

270 271 272
	return tmp;
}

273 274 275 276
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
	int ret = 0;
	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
277 278
	struct amdgpu_device *tmp_adev;
	bool update_hive_pstate = true;
279
	bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
280 281 282 283

	if (!hive)
		return 0;

284 285 286
	mutex_lock(&hive->hive_lock);

	if (hive->pstate == pstate) {
287 288
		adev->pstate = is_high_pstate ? pstate : adev->pstate;
		goto out;
289
	}
290 291 292

	dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);

293
	if (is_support_sw_smu_xgmi(adev))
294
		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
295 296 297 298 299
	else if (adev->powerplay.pp_funcs &&
		 adev->powerplay.pp_funcs->set_xgmi_pstate)
		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
								pstate);

300
	if (ret) {
301 302 303 304
		dev_err(adev->dev,
			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
		goto out;
	}

	/* Update device pstate */
	adev->pstate = pstate;

	/*
	 * Update the hive pstate only all devices of the hive
	 * are in the same pstate
	 */
	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
		if (tmp_adev->pstate != adev->pstate) {
			update_hive_pstate = false;
			break;
		}
	}
321
	if (update_hive_pstate || is_high_pstate)
322 323 324 325
		hive->pstate = pstate;

out:
	mutex_unlock(&hive->hive_lock);
326

327 328 329
	return ret;
}

330 331 332 333 334 335 336
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
	int ret = -EINVAL;

	/* Each psp need to set the latest topology */
	ret = psp_xgmi_set_topology_info(&adev->psp,
					 hive->number_devices,
337
					 &adev->psp.xgmi_context.top_info);
338 339 340 341 342 343 344 345 346
	if (ret)
		dev_err(adev->dev,
			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);

	return ret;
}

347 348 349 350 351 352 353 354 355 356 357 358 359

int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
		struct amdgpu_device *peer_adev)
{
	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
	int i;

	for (i = 0 ; i < top->num_nodes; ++i)
		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
			return top->nodes[i].num_hops;
	return	-EINVAL;
}

360 361
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
362
	struct psp_xgmi_topology_info *top_info;
363 364
	struct amdgpu_hive_info *hive;
	struct amdgpu_xgmi	*entry;
365
	struct amdgpu_device *tmp_adev = NULL;
366

367
	int count = 0, ret = 0;
368

369
	if (!adev->gmc.xgmi.supported)
370
		return 0;
371

372 373 374 375 376 377 378
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to get hive id\n");
			return ret;
		}
379

380 381 382 383 384 385 386 387 388
		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
		if (ret) {
			dev_err(adev->dev,
				"XGMI: Failed to get node id\n");
			return ret;
		}
	} else {
		adev->gmc.xgmi.hive_id = 16;
		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
389
	}
390

391
	hive = amdgpu_get_xgmi_hive(adev, 1);
392 393 394
	if (!hive) {
		ret = -EINVAL;
		dev_err(adev->dev,
395
			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
396
			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
397
		goto exit;
398
	}
399

400 401 402
	/* Set default device pstate */
	adev->pstate = -1;

403
	top_info = &adev->psp.xgmi_context.top_info;
404

405 406
	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
	list_for_each_entry(entry, &hive->device_list, head)
407
		top_info->nodes[count++].node_id = entry->node_id;
408
	top_info->num_nodes = count;
409
	hive->number_devices = count;
410

411 412 413 414 415 416 417 418 419 420 421 422
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			/* update node list for other device in the hive */
			if (tmp_adev != adev) {
				top_info = &tmp_adev->psp.xgmi_context.top_info;
				top_info->nodes[count - 1].node_id =
					adev->gmc.xgmi.node_id;
				top_info->num_nodes = count;
			}
			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
			if (ret)
				goto exit;
423 424
		}

425 426 427 428 429 430 431 432 433 434 435 436
		/* get latest topology info for each device from psp */
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
					&tmp_adev->psp.xgmi_context.top_info);
			if (ret) {
				dev_err(tmp_adev->dev,
					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
					tmp_adev->gmc.xgmi.node_id,
					tmp_adev->gmc.xgmi.hive_id, ret);
				/* To do : continue with some node failed or disable the whole hive */
				goto exit;
			}
437
		}
438
	}
439

440 441 442
	if (!ret)
		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);

443 444 445

	mutex_unlock(&hive->hive_lock);
exit:
446 447 448 449 450 451 452 453
	if (!ret)
		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
	else
		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
			ret);

454 455
	return ret;
}
456 457 458 459 460 461 462 463

void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
	struct amdgpu_hive_info *hive;

	if (!adev->gmc.xgmi.supported)
		return;

464
	hive = amdgpu_get_xgmi_hive(adev, 1);
465
	if (!hive)
466
		return;
467

468
	if (!(hive->number_devices--)) {
469
		amdgpu_xgmi_sysfs_destroy(adev, hive);
470
		mutex_destroy(&hive->hive_lock);
471 472
		mutex_destroy(&hive->reset_lock);
	} else {
473
		amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
474 475
		mutex_unlock(&hive->hive_lock);
	}
476
}
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511

int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
{
	int r;
	struct ras_ih_if ih_info = {
		.cb = NULL,
	};
	struct ras_fs_if fs_info = {
		.sysfs_name = "xgmi_wafl_err_count",
		.debugfs_name = "xgmi_wafl_err_inject",
	};

	if (!adev->gmc.xgmi.supported ||
	    adev->gmc.xgmi.num_physical_nodes == 0)
		return 0;

	if (!adev->gmc.xgmi.ras_if) {
		adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
		if (!adev->gmc.xgmi.ras_if)
			return -ENOMEM;
		adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
		adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
		adev->gmc.xgmi.ras_if->sub_block_index = 0;
		strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
	}
	ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
	r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
				 &fs_info, &ih_info);
	if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
		kfree(adev->gmc.xgmi.ras_if);
		adev->gmc.xgmi.ras_if = NULL;
	}

	return r;
}
512 513 514 515 516 517 518 519 520 521 522 523 524 525

void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
{
	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
			adev->gmc.xgmi.ras_if) {
		struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
		struct ras_ih_if ih_info = {
			.cb = NULL,
		};

		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
		kfree(ras_if);
	}
}