cdev.c 8.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/sched/task.h>
#include <linux/intel-svm.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/poll.h>
14
#include <linux/iommu.h>
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#include <uapi/linux/idxd.h>
#include "registers.h"
#include "idxd.h"

struct idxd_cdev_context {
	const char *name;
	dev_t devt;
	struct ida minor_ida;
};

/*
 * ictx is an array based off of accelerator types. enum idxd_type
 * is used as index
 */
static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
	{ .name = "dsa" },
31
	{ .name = "iax" }
32 33 34 35 36
};

struct idxd_user_context {
	struct idxd_wq *wq;
	struct task_struct *task;
37
	unsigned int pasid;
38
	unsigned int flags;
39
	struct iommu_sva *sva;
40 41 42 43
};

static void idxd_cdev_dev_release(struct device *dev)
{
44
	struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
45 46 47
	struct idxd_cdev_context *cdev_ctx;
	struct idxd_wq *wq = idxd_cdev->wq;

48
	cdev_ctx = &ictx[wq->idxd->data->type];
49 50
	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
	kfree(idxd_cdev);
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
}

static struct device_type idxd_cdev_device_type = {
	.name = "idxd_cdev",
	.release = idxd_cdev_dev_release,
};

static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
{
	struct cdev *cdev = inode->i_cdev;

	return container_of(cdev, struct idxd_cdev, cdev);
}

static inline struct idxd_wq *inode_wq(struct inode *inode)
{
67 68 69
	struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);

	return idxd_cdev->wq;
70 71 72 73 74 75 76 77
}

static int idxd_cdev_open(struct inode *inode, struct file *filp)
{
	struct idxd_user_context *ctx;
	struct idxd_device *idxd;
	struct idxd_wq *wq;
	struct device *dev;
78
	int rc = 0;
79 80
	struct iommu_sva *sva;
	unsigned int pasid;
81 82 83 84 85

	wq = inode_wq(inode);
	idxd = wq->idxd;
	dev = &idxd->pdev->dev;

86
	dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
87 88 89 90 91

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

92 93 94 95 96 97 98
	mutex_lock(&wq->wq_lock);

	if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
		rc = -EBUSY;
		goto failed;
	}

99 100
	ctx->wq = wq;
	filp->private_data = ctx;
101 102 103 104 105 106 107 108 109 110 111 112

	if (device_pasid_enabled(idxd)) {
		sva = iommu_sva_bind_device(dev, current->mm, NULL);
		if (IS_ERR(sva)) {
			rc = PTR_ERR(sva);
			dev_err(dev, "pasid allocation failed: %d\n", rc);
			goto failed;
		}

		pasid = iommu_sva_get_pasid(sva);
		if (pasid == IOMMU_PASID_INVALID) {
			iommu_sva_unbind_device(sva);
113
			rc = -EINVAL;
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
			goto failed;
		}

		ctx->sva = sva;
		ctx->pasid = pasid;

		if (wq_dedicated(wq)) {
			rc = idxd_wq_set_pasid(wq, pasid);
			if (rc < 0) {
				iommu_sva_unbind_device(sva);
				dev_err(dev, "wq set pasid failed: %d\n", rc);
				goto failed;
			}
		}
	}

130
	idxd_wq_get(wq);
131
	mutex_unlock(&wq->wq_lock);
132
	return 0;
133 134 135 136 137

 failed:
	mutex_unlock(&wq->wq_lock);
	kfree(ctx);
	return rc;
138 139 140 141 142 143 144 145
}

static int idxd_cdev_release(struct inode *node, struct file *filep)
{
	struct idxd_user_context *ctx = filep->private_data;
	struct idxd_wq *wq = ctx->wq;
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
146
	int rc;
147 148 149 150

	dev_dbg(dev, "%s called\n", __func__);
	filep->private_data = NULL;

151
	/* Wait for in-flight operations to complete. */
152 153 154 155 156 157 158 159 160 161 162 163
	if (wq_shared(wq)) {
		idxd_device_drain_pasid(idxd, ctx->pasid);
	} else {
		if (device_pasid_enabled(idxd)) {
			/* The wq disable in the disable pasid function will drain the wq */
			rc = idxd_wq_disable_pasid(wq);
			if (rc < 0)
				dev_err(dev, "wq disable pasid failed.\n");
		} else {
			idxd_wq_drain(wq);
		}
	}
164

165 166
	if (ctx->sva)
		iommu_sva_unbind_device(ctx->sva);
167
	kfree(ctx);
168
	mutex_lock(&wq->wq_lock);
169
	idxd_wq_put(wq);
170
	mutex_unlock(&wq->wq_lock);
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	return 0;
}

static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
		     const char *func)
{
	struct device *dev = &wq->idxd->pdev->dev;

	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
		dev_info_ratelimited(dev,
				     "%s: %s: mapping too large: %lu\n",
				     current->comm, func,
				     vma->vm_end - vma->vm_start);
		return -EINVAL;
	}

	return 0;
}

static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct idxd_user_context *ctx = filp->private_data;
	struct idxd_wq *wq = ctx->wq;
	struct idxd_device *idxd = wq->idxd;
	struct pci_dev *pdev = idxd->pdev;
	phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
	unsigned long pfn;
	int rc;

	dev_dbg(&pdev->dev, "%s called\n", __func__);
	rc = check_vma(wq, vma, __func__);
202 203
	if (rc < 0)
		return rc;
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222

	vma->vm_flags |= VM_DONTCOPY;
	pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
				IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_private_data = ctx;

	return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
			vma->vm_page_prot);
}

static __poll_t idxd_cdev_poll(struct file *filp,
			       struct poll_table_struct *wait)
{
	struct idxd_user_context *ctx = filp->private_data;
	struct idxd_wq *wq = ctx->wq;
	struct idxd_device *idxd = wq->idxd;
	__poll_t out = 0;

223
	poll_wait(filp, &wq->err_queue, wait);
224
	spin_lock(&idxd->dev_lock);
225 226
	if (idxd->sw_err.valid)
		out = EPOLLIN | EPOLLRDNORM;
227
	spin_unlock(&idxd->dev_lock);
228 229 230 231 232 233 234 235 236 237 238 239 240 241

	return out;
}

static const struct file_operations idxd_cdev_fops = {
	.owner = THIS_MODULE,
	.open = idxd_cdev_open,
	.release = idxd_cdev_release,
	.mmap = idxd_cdev_mmap,
	.poll = idxd_cdev_poll,
};

int idxd_cdev_get_major(struct idxd_device *idxd)
{
242
	return MAJOR(ictx[idxd->data->type].devt);
243 244
}

245
int idxd_wq_add_cdev(struct idxd_wq *wq)
246 247
{
	struct idxd_device *idxd = wq->idxd;
248 249
	struct idxd_cdev *idxd_cdev;
	struct cdev *cdev;
250
	struct device *dev;
251 252
	struct idxd_cdev_context *cdev_ctx;
	int rc, minor;
253

254 255
	idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
	if (!idxd_cdev)
256 257
		return -ENOMEM;

258
	idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
259 260
	idxd_cdev->wq = wq;
	cdev = &idxd_cdev->cdev;
261
	dev = cdev_dev(idxd_cdev);
262
	cdev_ctx = &ictx[wq->idxd->data->type];
263 264
	minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
	if (minor < 0) {
265 266
		kfree(idxd_cdev);
		return minor;
267 268 269
	}
	idxd_cdev->minor = minor;

270
	device_initialize(dev);
271
	dev->parent = wq_confdev(wq);
D
Dave Jiang 已提交
272
	dev->bus = &dsa_bus_type;
273 274
	dev->type = &idxd_cdev_device_type;
	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
275

276
	rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
277
	if (rc < 0)
278
		goto err;
279

280
	wq->idxd_cdev = idxd_cdev;
281
	cdev_init(cdev, &idxd_cdev_fops);
282
	rc = cdev_device_add(cdev, dev);
283 284
	if (rc) {
		dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
285
		goto err;
286 287 288
	}

	return 0;
289 290 291 292 293

 err:
	put_device(dev);
	wq->idxd_cdev = NULL;
	return rc;
294 295 296 297
}

void idxd_wq_del_cdev(struct idxd_wq *wq)
{
298 299 300 301
	struct idxd_cdev *idxd_cdev;

	idxd_cdev = wq->idxd_cdev;
	wq->idxd_cdev = NULL;
302 303
	cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
	put_device(cdev_dev(idxd_cdev));
304 305
}

306 307 308 309 310 311 312 313 314 315 316
static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
{
	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
	struct idxd_device *idxd = wq->idxd;
	int rc;

	if (idxd->state != IDXD_DEV_ENABLED)
		return -ENXIO;

	mutex_lock(&wq->wq_lock);
	wq->type = IDXD_WQT_USER;
317
	rc = drv_enable_wq(wq);
318 319 320 321
	if (rc < 0)
		goto err;

	rc = idxd_wq_add_cdev(wq);
322 323
	if (rc < 0) {
		idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
324
		goto err_cdev;
325
	}
326

327
	idxd->cmd_status = 0;
328 329 330 331
	mutex_unlock(&wq->wq_lock);
	return 0;

err_cdev:
332
	drv_disable_wq(wq);
333 334 335 336 337 338 339 340 341 342 343 344
err:
	wq->type = IDXD_WQT_NONE;
	mutex_unlock(&wq->wq_lock);
	return rc;
}

static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
{
	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);

	mutex_lock(&wq->wq_lock);
	idxd_wq_del_cdev(wq);
345
	drv_disable_wq(wq);
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	wq->type = IDXD_WQT_NONE;
	mutex_unlock(&wq->wq_lock);
}

static enum idxd_dev_type dev_types[] = {
	IDXD_DEV_WQ,
	IDXD_DEV_NONE,
};

struct idxd_device_driver idxd_user_drv = {
	.probe = idxd_user_drv_probe,
	.remove = idxd_user_drv_remove,
	.name = "user",
	.type = dev_types,
};
361
EXPORT_SYMBOL_GPL(idxd_user_drv);
362

363 364 365 366 367 368 369 370 371
int idxd_cdev_register(void)
{
	int rc, i;

	for (i = 0; i < IDXD_TYPE_MAX; i++) {
		ida_init(&ictx[i].minor_ida);
		rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
					 ictx[i].name);
		if (rc)
372
			goto err_free_chrdev_region;
373 374 375
	}

	return 0;
376 377 378 379 380 381

err_free_chrdev_region:
	for (i--; i >= 0; i--)
		unregister_chrdev_region(ictx[i].devt, MINORMASK);

	return rc;
382 383 384 385 386 387 388 389 390 391 392
}

void idxd_cdev_remove(void)
{
	int i;

	for (i = 0; i < IDXD_TYPE_MAX; i++) {
		unregister_chrdev_region(ictx[i].devt, MINORMASK);
		ida_destroy(&ictx[i].minor_ida);
	}
}