amdgpu_xgmi.c 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/list.h>
#include "amdgpu.h"
26
#include "amdgpu_xgmi.h"
27 28 29 30 31 32 33 34 35 36


static DEFINE_MUTEX(xgmi_mutex);

#define AMDGPU_MAX_XGMI_HIVE			8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4

static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;

37 38 39 40 41 42

void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
	return &hive->device_list;
}

43
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
44 45 46 47 48 49
{
	int i;
	struct amdgpu_hive_info *tmp;

	if (!adev->gmc.xgmi.hive_id)
		return NULL;
50 51 52

	mutex_lock(&xgmi_mutex);

53 54
	for (i = 0 ; i < hive_count; ++i) {
		tmp = &xgmi_hives[i];
55 56 57 58
		if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
			if (lock)
				mutex_lock(&tmp->hive_lock);
			mutex_unlock(&xgmi_mutex);
59
			return tmp;
60
		}
61
	}
62 63
	if (i >= AMDGPU_MAX_XGMI_HIVE) {
		mutex_unlock(&xgmi_mutex);
64
		return NULL;
65
	}
66 67 68 69 70

	/* initialize new hive if not exist */
	tmp = &xgmi_hives[hive_count++];
	tmp->hive_id = adev->gmc.xgmi.hive_id;
	INIT_LIST_HEAD(&tmp->device_list);
71
	mutex_init(&tmp->hive_lock);
72 73 74 75 76
	mutex_init(&tmp->reset_lock);
	if (lock)
		mutex_lock(&tmp->hive_lock);

	mutex_unlock(&xgmi_mutex);
77

78 79 80
	return tmp;
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
	int ret = -EINVAL;

	/* Each psp need to set the latest topology */
	ret = psp_xgmi_set_topology_info(&adev->psp,
					 hive->number_devices,
					 &hive->topology_info);
	if (ret)
		dev_err(adev->dev,
			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);

	return ret;
}

98 99
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
100
	struct psp_xgmi_topology_info *hive_topology;
101 102
	struct amdgpu_hive_info *hive;
	struct amdgpu_xgmi	*entry;
103
	struct amdgpu_device *tmp_adev = NULL;
104 105 106

	int count = 0, ret = -EINVAL;

107
	if (!adev->gmc.xgmi.supported)
108
		return 0;
109

110 111 112 113 114 115 116 117 118 119 120 121 122
	ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
	if (ret) {
		dev_err(adev->dev,
			"XGMI: Failed to get node id\n");
		return ret;
	}

	ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
	if (ret) {
		dev_err(adev->dev,
			"XGMI: Failed to get hive id\n");
		return ret;
	}
123

124
	hive = amdgpu_get_xgmi_hive(adev, 1);
125 126 127
	if (!hive) {
		ret = -EINVAL;
		dev_err(adev->dev,
128
			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
129
			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
130
		goto exit;
131
	}
132

133 134
	hive_topology = &hive->topology_info;

135 136
	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
	list_for_each_entry(entry, &hive->device_list, head)
137 138
		hive_topology->nodes[count++].node_id = entry->node_id;
	hive->number_devices = count;
139

140 141
	/* Each psp need to get the latest topology */
	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
142
		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
143 144 145 146 147 148 149 150
		if (ret) {
			dev_err(tmp_adev->dev,
				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
				tmp_adev->gmc.xgmi.node_id,
				tmp_adev->gmc.xgmi.hive_id, ret);
			/* To do : continue with some node failed or disable the whole hive */
			break;
		}
151
	}
152

153
	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
154 155
		ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
		if (ret)
156 157 158
			break;
	}

159 160 161
	dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
		 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);

162
	mutex_unlock(&hive->hive_lock);
163 164 165
exit:
	return ret;
}
166 167 168 169 170 171 172 173

void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
	struct amdgpu_hive_info *hive;

	if (!adev->gmc.xgmi.supported)
		return;

174
	hive = amdgpu_get_xgmi_hive(adev, 1);
175
	if (!hive)
176
		return;
177

178
	if (!(hive->number_devices--)) {
179
		mutex_destroy(&hive->hive_lock);
180 181 182 183
		mutex_destroy(&hive->reset_lock);
	} else {
		mutex_unlock(&hive->hive_lock);
	}
184
}