amdgpu_amdkfd.c 6.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "amdgpu_amdkfd.h"
24
#include "amd_shared.h"
25 26
#include <drm/drmP.h>
#include "amdgpu.h"
27
#include "amdgpu_gfx.h"
28 29 30 31 32 33
#include <linux/module.h>

const struct kfd2kgd_calls *kfd2kgd;
const struct kgd2kfd_calls *kgd2kfd;
bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);

34
int amdgpu_amdkfd_init(void)
35
{
36 37
	int ret;

38
#if defined(CONFIG_HSA_AMD_MODULE)
39
	int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
40 41 42 43

	kgd2kfd_init_p = symbol_request(kgd2kfd_init);

	if (kgd2kfd_init_p == NULL)
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
		return -ENOENT;

	ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
	if (ret) {
		symbol_put(kgd2kfd_init);
		kgd2kfd = NULL;
	}

#elif defined(CONFIG_HSA_AMD)
	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
	if (ret)
		kgd2kfd = NULL;

#else
	ret = -ENOENT;
59
#endif
60 61

	return ret;
62 63
}

64
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
65
{
66
	switch (adev->asic_type) {
67
#ifdef CONFIG_DRM_AMDGPU_CIK
68
	case CHIP_KAVERI:
69 70
		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
		break;
71
#endif
72 73 74
	case CHIP_CARRIZO:
		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
		break;
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	default:
		return false;
	}

	return true;
}

void amdgpu_amdkfd_fini(void)
{
	if (kgd2kfd) {
		kgd2kfd->exit();
		symbol_put(kgd2kfd_init);
	}
}

90
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
91 92
{
	if (kgd2kfd)
93 94
		adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
					adev->pdev, kfd2kgd);
95 96
}

97
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
98
{
99 100
	int i;
	int last_valid_bit;
101
	if (adev->kfd) {
102 103
		struct kgd2kfd_shared_resources gpu_resources = {
			.compute_vmid_bitmap = 0xFF00,
104 105
			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
106 107
		};

108 109 110 111 112 113
		/* this is going to have a few of the MSBs set that we need to
		 * clear */
		bitmap_complement(gpu_resources.queue_bitmap,
				  adev->gfx.mec.queue_bitmap,
				  KGD_MAX_QUEUES);

114 115
		/* remove the KIQ bit as well */
		if (adev->gfx.kiq.ring.ready)
116 117 118 119
			clear_bit(amdgpu_gfx_queue_to_bit(adev,
							  adev->gfx.kiq.ring.me - 1,
							  adev->gfx.kiq.ring.pipe,
							  adev->gfx.kiq.ring.queue),
120 121
				  gpu_resources.queue_bitmap);

122 123
		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
		 * nbits is not compile time constant */
124
		last_valid_bit = 1 /* only first MEC can have compute queues */
125 126 127 128 129
				* adev->gfx.mec.num_pipe_per_mec
				* adev->gfx.mec.num_queue_per_pipe;
		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
			clear_bit(i, gpu_resources.queue_bitmap);

130
		amdgpu_doorbell_get_kfd_info(adev,
131 132 133 134
				&gpu_resources.doorbell_physical_address,
				&gpu_resources.doorbell_aperture_size,
				&gpu_resources.doorbell_start_offset);

135
		kgd2kfd->device_init(adev->kfd, &gpu_resources);
136 137 138
	}
}

139
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
140
{
141 142 143
	if (adev->kfd) {
		kgd2kfd->device_exit(adev->kfd);
		adev->kfd = NULL;
144 145 146
	}
}

147
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
148 149
		const void *ih_ring_entry)
{
150 151
	if (adev->kfd)
		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
152 153
}

154
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
155
{
156 157
	if (adev->kfd)
		kgd2kfd->suspend(adev->kfd);
158 159
}

160
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
161 162 163
{
	int r = 0;

164 165
	if (adev->kfd)
		r = kgd2kfd->resume(adev->kfd);
166 167 168 169 170 171 172 173

	return r;
}

int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
			void **mem_obj, uint64_t *gpu_addr,
			void **cpu_ptr)
{
174
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
175 176 177 178 179 180 181 182 183 184 185
	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
	int r;

	BUG_ON(kgd == NULL);
	BUG_ON(gpu_addr == NULL);
	BUG_ON(cpu_ptr == NULL);

	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
	if ((*mem) == NULL)
		return -ENOMEM;

186
	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
187
			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
188
	if (r) {
189
		dev_err(adev->dev,
190 191 192 193 194 195 196
			"failed to allocate BO for amdkfd (%d)\n", r);
		return r;
	}

	/* map the buffer */
	r = amdgpu_bo_reserve((*mem)->bo, true);
	if (r) {
197
		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
198 199 200 201 202 203
		goto allocate_mem_reserve_bo_failed;
	}

	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
				&(*mem)->gpu_addr);
	if (r) {
204
		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
205 206 207 208 209 210
		goto allocate_mem_pin_bo_failed;
	}
	*gpu_addr = (*mem)->gpu_addr;

	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
	if (r) {
211
		dev_err(adev->dev,
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
			"(%d) failed to map bo to kernel for amdkfd\n", r);
		goto allocate_mem_kmap_bo_failed;
	}
	*cpu_ptr = (*mem)->cpu_ptr;

	amdgpu_bo_unreserve((*mem)->bo);

	return 0;

allocate_mem_kmap_bo_failed:
	amdgpu_bo_unpin((*mem)->bo);
allocate_mem_pin_bo_failed:
	amdgpu_bo_unreserve((*mem)->bo);
allocate_mem_reserve_bo_failed:
	amdgpu_bo_unref(&(*mem)->bo);

	return r;
}

void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;

	BUG_ON(mem == NULL);

	amdgpu_bo_reserve(mem->bo, true);
	amdgpu_bo_kunmap(mem->bo);
	amdgpu_bo_unpin(mem->bo);
	amdgpu_bo_unreserve(mem->bo);
	amdgpu_bo_unref(&(mem->bo));
	kfree(mem);
}

uint64_t get_vmem_size(struct kgd_dev *kgd)
{
247
	struct amdgpu_device *adev =
248 249 250 251
		(struct amdgpu_device *)kgd;

	BUG_ON(kgd == NULL);

252
	return adev->mc.real_vram_size;
253 254 255 256
}

uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
{
257
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
258

259 260
	if (adev->gfx.funcs->get_gpu_clock_counter)
		return adev->gfx.funcs->get_gpu_clock_counter(adev);
261 262 263 264 265
	return 0;
}

uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
266
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
267 268

	/* The sclk is in quantas of 10kHz */
269
	return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
270
}