command_buffer.c 10.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
// SPDX-License-Identifier: GPL-2.0

/*
 * Copyright 2016-2019 HabanaLabs, Ltd.
 * All Rights Reserved.
 */

#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"

#include <linux/mm.h>
#include <linux/slab.h>
13
#include <linux/uaccess.h>
14
#include <linux/genalloc.h>
15 16 17

static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
18 19 20 21 22 23 24 25
	if (cb->is_internal)
		gen_pool_free(hdev->internal_cb_pool,
				cb->kernel_address, cb->size);
	else
		hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
				(void *) (uintptr_t) cb->kernel_address,
				cb->bus_address);

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
	kfree(cb);
}

static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
{
	if (cb->is_pool) {
		spin_lock(&hdev->cb_pool_lock);
		list_add(&cb->pool_list, &hdev->cb_pool);
		spin_unlock(&hdev->cb_pool_lock);
	} else {
		cb_fini(hdev, cb);
	}
}

static void cb_release(struct kref *ref)
{
	struct hl_device *hdev;
	struct hl_cb *cb;

	cb = container_of(ref, struct hl_cb, refcount);
	hdev = cb->hdev;

O
Oded Gabbay 已提交
48 49
	hl_debugfs_remove_cb(cb);

50 51 52 53
	cb_do_release(hdev, cb);
}

static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
54
					int ctx_id, bool internal_cb)
55 56
{
	struct hl_cb *cb;
57
	u32 cb_offset;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
	void *p;

	/*
	 * We use of GFP_ATOMIC here because this function can be called from
	 * the latency-sensitive code path for command submission. Due to H/W
	 * limitations in some of the ASICs, the kernel must copy the user CB
	 * that is designated for an external queue and actually enqueue
	 * the kernel's copy. Hence, we must never sleep in this code section
	 * and must use GFP_ATOMIC for all memory allocations.
	 */
	if (ctx_id == HL_KERNEL_ASID_ID)
		cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
	else
		cb = kzalloc(sizeof(*cb), GFP_KERNEL);

	if (!cb)
		return NULL;

76 77 78 79 80 81 82 83 84 85 86
	if (internal_cb) {
		p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
		if (!p) {
			kfree(cb);
			return NULL;
		}

		cb_offset = p - hdev->internal_cb_pool_virt_addr;
		cb->is_internal = true;
		cb->bus_address =  hdev->internal_cb_va_base + cb_offset;
	} else if (ctx_id == HL_KERNEL_ASID_ID) {
87
		p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
88
						&cb->bus_address, GFP_ATOMIC);
89
	} else {
90
		p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
91 92
						&cb->bus_address,
						GFP_USER | __GFP_ZERO);
93 94
	}

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	if (!p) {
		dev_err(hdev->dev,
			"failed to allocate %d of dma memory for CB\n",
			cb_size);
		kfree(cb);
		return NULL;
	}

	cb->kernel_address = (u64) (uintptr_t) p;
	cb->size = cb_size;

	return cb;
}

int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
110
			u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
111 112 113 114 115
{
	struct hl_cb *cb;
	bool alloc_new_cb = true;
	int rc;

116 117 118 119 120 121
	/*
	 * Can't use generic function to check this because of special case
	 * where we create a CB as part of the reset process
	 */
	if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
					(ctx_id != HL_KERNEL_ASID_ID))) {
122
		dev_warn_ratelimited(hdev->dev,
123
			"Device is disabled or in reset. Can't create new CBs\n");
124 125 126 127
		rc = -EBUSY;
		goto out_err;
	}

128 129 130
	if (cb_size > SZ_2M) {
		dev_err(hdev->dev, "CB size %d must be less than %d\n",
			cb_size, SZ_2M);
131 132 133 134
		rc = -EINVAL;
		goto out_err;
	}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	if (!internal_cb) {
		/* Minimum allocation must be PAGE SIZE */
		if (cb_size < PAGE_SIZE)
			cb_size = PAGE_SIZE;

		if (ctx_id == HL_KERNEL_ASID_ID &&
				cb_size <= hdev->asic_prop.cb_pool_cb_size) {

			spin_lock(&hdev->cb_pool_lock);
			if (!list_empty(&hdev->cb_pool)) {
				cb = list_first_entry(&hdev->cb_pool,
						typeof(*cb), pool_list);
				list_del(&cb->pool_list);
				spin_unlock(&hdev->cb_pool_lock);
				alloc_new_cb = false;
			} else {
				spin_unlock(&hdev->cb_pool_lock);
				dev_dbg(hdev->dev, "CB pool is empty\n");
			}
154 155 156 157
		}
	}

	if (alloc_new_cb) {
158
		cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
		if (!cb) {
			rc = -ENOMEM;
			goto out_err;
		}
	}

	cb->hdev = hdev;
	cb->ctx_id = ctx_id;

	spin_lock(&mgr->cb_lock);
	rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
	spin_unlock(&mgr->cb_lock);

	if (rc < 0) {
		dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
		goto release_cb;
	}

177
	cb->id = (u64) rc;
178 179 180 181 182 183 184 185 186 187 188

	kref_init(&cb->refcount);
	spin_lock_init(&cb->lock);

	/*
	 * idr is 32-bit so we can safely OR it with a mask that is above
	 * 32 bit
	 */
	*handle = cb->id | HL_MMAP_CB_MASK;
	*handle <<= PAGE_SHIFT;

O
Oded Gabbay 已提交
189 190
	hl_debugfs_add_cb(cb);

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	return 0;

release_cb:
	cb_do_release(hdev, cb);
out_err:
	*handle = 0;

	return rc;
}

int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
{
	struct hl_cb *cb;
	u32 handle;
	int rc = 0;

	/*
	 * handle was given to user to do mmap, I need to shift it back to
	 * how the idr module gave it to me
	 */
	cb_handle >>= PAGE_SHIFT;
	handle = (u32) cb_handle;

	spin_lock(&mgr->cb_lock);

	cb = idr_find(&mgr->cb_handles, handle);
	if (cb) {
		idr_remove(&mgr->cb_handles, handle);
		spin_unlock(&mgr->cb_lock);
		kref_put(&cb->refcount, cb_release);
	} else {
		spin_unlock(&mgr->cb_lock);
		dev_err(hdev->dev,
			"CB destroy failed, no match to handle 0x%x\n", handle);
		rc = -EINVAL;
	}

	return rc;
}

int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
{
	union hl_cb_args *args = data;
	struct hl_device *hdev = hpriv->hdev;
235
	u64 handle = 0;
236 237
	int rc;

238 239 240 241 242 243 244
	if (hl_device_disabled_or_in_reset(hdev)) {
		dev_warn_ratelimited(hdev->dev,
			"Device is %s. Can't execute CB IOCTL\n",
			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
		return -EBUSY;
	}

245 246
	switch (args->in.op) {
	case HL_CB_OP_CREATE:
247 248 249 250 251 252 253
		if (args->in.cb_size > HL_MAX_CB_SIZE) {
			dev_err(hdev->dev,
				"User requested CB size %d must be less than %d\n",
				args->in.cb_size, HL_MAX_CB_SIZE);
			rc = -EINVAL;
		} else {
			rc = hl_cb_create(hdev, &hpriv->cb_mgr,
254 255
					args->in.cb_size, &handle,
					hpriv->ctx->asid, false);
256 257
		}

258 259 260
		memset(args, 0, sizeof(*args));
		args->out.cb_handle = handle;
		break;
261

262 263 264 265
	case HL_CB_OP_DESTROY:
		rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
					args->in.cb_handle);
		break;
266

267 268 269 270 271 272 273 274 275 276 277
	default:
		rc = -ENOTTY;
		break;
	}

	return rc;
}

static void cb_vm_close(struct vm_area_struct *vma)
{
	struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
278
	long new_mmap_size;
279

280
	new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
281

282 283
	if (new_mmap_size > 0) {
		cb->mmap_size = new_mmap_size;
284
		return;
285
	}
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303

	spin_lock(&cb->lock);
	cb->mmap = false;
	spin_unlock(&cb->lock);

	hl_cb_put(cb);
	vma->vm_private_data = NULL;
}

static const struct vm_operations_struct cb_vm_ops = {
	.close = cb_vm_close
};

int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
	struct hl_device *hdev = hpriv->hdev;
	struct hl_cb *cb;
	phys_addr_t address;
304
	u32 handle, user_cb_size;
305 306 307 308 309 310 311 312
	int rc;

	handle = vma->vm_pgoff;

	/* reference was taken here */
	cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
	if (!cb) {
		dev_err(hdev->dev,
313
			"CB mmap failed, no match to handle 0x%x\n", handle);
314 315 316 317
		return -EINVAL;
	}

	/* Validation check */
318 319
	user_cb_size = vma->vm_end - vma->vm_start;
	if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
320 321 322 323 324 325 326
		dev_err(hdev->dev,
			"CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
			vma->vm_end - vma->vm_start, cb->size);
		rc = -EINVAL;
		goto put_cb;
	}

327 328 329 330 331 332 333 334 335 336
	if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
							user_cb_size)) {
		dev_err(hdev->dev,
			"user pointer is invalid - 0x%lx\n",
			vma->vm_start);

		rc = -EINVAL;
		goto put_cb;
	}

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	spin_lock(&cb->lock);

	if (cb->mmap) {
		dev_err(hdev->dev,
			"CB mmap failed, CB already mmaped to user\n");
		rc = -EINVAL;
		goto release_lock;
	}

	cb->mmap = true;

	spin_unlock(&cb->lock);

	vma->vm_ops = &cb_vm_ops;

	/*
	 * Note: We're transferring the cb reference to
	 * vma->vm_private_data here.
	 */

	vma->vm_private_data = cb;

	/* Calculate address for CB */
	address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);

	rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
					address, cb->size);

	if (rc) {
		spin_lock(&cb->lock);
		cb->mmap = false;
		goto release_lock;
	}

	cb->mmap_size = cb->size;

	return 0;

release_lock:
	spin_unlock(&cb->lock);
put_cb:
	hl_cb_put(cb);
	return rc;
}

struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
			u32 handle)
{
	struct hl_cb *cb;

	spin_lock(&mgr->cb_lock);
	cb = idr_find(&mgr->cb_handles, handle);

	if (!cb) {
		spin_unlock(&mgr->cb_lock);
		dev_warn(hdev->dev,
393
			"CB get failed, no match to handle 0x%x\n", handle);
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		return NULL;
	}

	kref_get(&cb->refcount);

	spin_unlock(&mgr->cb_lock);

	return cb;

}

void hl_cb_put(struct hl_cb *cb)
{
	kref_put(&cb->refcount, cb_release);
}

void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
{
	spin_lock_init(&mgr->cb_lock);
	idr_init(&mgr->cb_handles);
}

void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
{
	struct hl_cb *cb;
	struct idr *idp;
	u32 id;

	idp = &mgr->cb_handles;

	idr_for_each_entry(idp, cb, id) {
		if (kref_put(&cb->refcount, cb_release) != 1)
			dev_err(hdev->dev,
				"CB %d for CTX ID %d is still alive\n",
				id, cb->ctx_id);
	}

	idr_destroy(&mgr->cb_handles);
}

434 435
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
					bool internal_cb)
436 437 438 439 440 441
{
	u64 cb_handle;
	struct hl_cb *cb;
	int rc;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
442
			HL_KERNEL_ASID_ID, internal_cb);
443
	if (rc) {
444 445
		dev_err(hdev->dev,
			"Failed to allocate CB for the kernel driver %d\n", rc);
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
		return NULL;
	}

	cb_handle >>= PAGE_SHIFT;
	cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
	/* hl_cb_get should never fail here so use kernel WARN */
	WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
	if (!cb)
		goto destroy_cb;

	return cb;

destroy_cb:
	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);

	return NULL;
}

int hl_cb_pool_init(struct hl_device *hdev)
{
	struct hl_cb *cb;
	int i;

	INIT_LIST_HEAD(&hdev->cb_pool);
	spin_lock_init(&hdev->cb_pool_lock);

	for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
		cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
474
				HL_KERNEL_ASID_ID, false);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
		if (cb) {
			cb->is_pool = true;
			list_add(&cb->pool_list, &hdev->cb_pool);
		} else {
			hl_cb_pool_fini(hdev);
			return -ENOMEM;
		}
	}

	return 0;
}

int hl_cb_pool_fini(struct hl_device *hdev)
{
	struct hl_cb *cb, *tmp;

	list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
		list_del(&cb->pool_list);
		cb_fini(hdev, cb);
	}

	return 0;
}