amdgpu_xgmi.c 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/list.h>
#include "amdgpu.h"
26
#include "amdgpu_xgmi.h"
27 28 29 30 31 32 33 34 35 36


static DEFINE_MUTEX(xgmi_mutex);

#define AMDGPU_MAX_XGMI_HIVE			8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4

static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;

37 38 39 40 41 42

void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
	return &hive->device_list;
}

43
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
{
	int i;
	struct amdgpu_hive_info *tmp;

	if (!adev->gmc.xgmi.hive_id)
		return NULL;
	for (i = 0 ; i < hive_count; ++i) {
		tmp = &xgmi_hives[i];
		if (tmp->hive_id == adev->gmc.xgmi.hive_id)
			return tmp;
	}
	if (i >= AMDGPU_MAX_XGMI_HIVE)
		return NULL;

	/* initialize new hive if not exist */
	tmp = &xgmi_hives[hive_count++];
	tmp->hive_id = adev->gmc.xgmi.hive_id;
	INIT_LIST_HEAD(&tmp->device_list);
62 63
	mutex_init(&tmp->hive_lock);

64 65 66
	return tmp;
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
	int ret = -EINVAL;

	/* Each psp need to set the latest topology */
	ret = psp_xgmi_set_topology_info(&adev->psp,
					 hive->number_devices,
					 &hive->topology_info);
	if (ret)
		dev_err(adev->dev,
			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
			adev->gmc.xgmi.node_id,
			adev->gmc.xgmi.hive_id, ret);
	else
81
		dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
82 83 84 85 86 87
			 adev->gmc.xgmi.physical_node_id,
				 adev->gmc.xgmi.hive_id);

	return ret;
}

88 89
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
90
	struct psp_xgmi_topology_info *hive_topology;
91 92
	struct amdgpu_hive_info *hive;
	struct amdgpu_xgmi	*entry;
93
	struct amdgpu_device *tmp_adev = NULL;
94 95 96

	int count = 0, ret = -EINVAL;

97
	if (!adev->gmc.xgmi.supported)
98
		return 0;
99

100 101 102 103 104 105 106 107 108 109 110 111 112
	ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
	if (ret) {
		dev_err(adev->dev,
			"XGMI: Failed to get node id\n");
		return ret;
	}

	ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
	if (ret) {
		dev_err(adev->dev,
			"XGMI: Failed to get hive id\n");
		return ret;
	}
113 114 115 116 117 118

	mutex_lock(&xgmi_mutex);
	hive = amdgpu_get_xgmi_hive(adev);
	if (!hive)
		goto exit;

119 120
	hive_topology = &hive->topology_info;

121 122
	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
	list_for_each_entry(entry, &hive->device_list, head)
123 124
		hive_topology->nodes[count++].node_id = entry->node_id;
	hive->number_devices = count;
125

126 127
	/* Each psp need to get the latest topology */
	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
128
		ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
129 130 131 132 133 134 135 136
		if (ret) {
			dev_err(tmp_adev->dev,
				"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
				tmp_adev->gmc.xgmi.node_id,
				tmp_adev->gmc.xgmi.hive_id, ret);
			/* To do : continue with some node failed or disable the whole hive */
			break;
		}
137
	}
138

139
	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
140 141
		ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
		if (ret)
142 143 144 145 146 147 148
			break;
	}

exit:
	mutex_unlock(&xgmi_mutex);
	return ret;
}
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
	struct amdgpu_hive_info *hive;

	if (!adev->gmc.xgmi.supported)
		return;

	mutex_lock(&xgmi_mutex);

	hive = amdgpu_get_xgmi_hive(adev);
	if (!hive)
		goto exit;

	if (!(hive->number_devices--))
		mutex_destroy(&hive->hive_lock);

exit:
	mutex_unlock(&xgmi_mutex);
}