cdev.c 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/sched/task.h>
#include <linux/intel-svm.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/poll.h>
14
#include <linux/iommu.h>
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#include <uapi/linux/idxd.h>
#include "registers.h"
#include "idxd.h"

struct idxd_cdev_context {
	const char *name;
	dev_t devt;
	struct ida minor_ida;
};

/*
 * ictx is an array based off of accelerator types. enum idxd_type
 * is used as index
 */
static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
	{ .name = "dsa" },
};

struct idxd_user_context {
	struct idxd_wq *wq;
	struct task_struct *task;
36
	unsigned int pasid;
37
	unsigned int flags;
38
	struct iommu_sva *sva;
39 40 41 42
};

static void idxd_cdev_dev_release(struct device *dev)
{
43 44 45 46 47 48 49
	struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
	struct idxd_cdev_context *cdev_ctx;
	struct idxd_wq *wq = idxd_cdev->wq;

	cdev_ctx = &ictx[wq->idxd->type];
	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
	kfree(idxd_cdev);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
}

static struct device_type idxd_cdev_device_type = {
	.name = "idxd_cdev",
	.release = idxd_cdev_dev_release,
};

static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
{
	struct cdev *cdev = inode->i_cdev;

	return container_of(cdev, struct idxd_cdev, cdev);
}

static inline struct idxd_wq *inode_wq(struct inode *inode)
{
66 67 68
	struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);

	return idxd_cdev->wq;
69 70 71 72 73 74 75 76
}

static int idxd_cdev_open(struct inode *inode, struct file *filp)
{
	struct idxd_user_context *ctx;
	struct idxd_device *idxd;
	struct idxd_wq *wq;
	struct device *dev;
77
	int rc = 0;
78 79
	struct iommu_sva *sva;
	unsigned int pasid;
80 81 82 83 84

	wq = inode_wq(inode);
	idxd = wq->idxd;
	dev = &idxd->pdev->dev;

85
	dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
86 87 88 89 90

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

91 92 93 94 95 96 97
	mutex_lock(&wq->wq_lock);

	if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
		rc = -EBUSY;
		goto failed;
	}

98 99
	ctx->wq = wq;
	filp->private_data = ctx;
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127

	if (device_pasid_enabled(idxd)) {
		sva = iommu_sva_bind_device(dev, current->mm, NULL);
		if (IS_ERR(sva)) {
			rc = PTR_ERR(sva);
			dev_err(dev, "pasid allocation failed: %d\n", rc);
			goto failed;
		}

		pasid = iommu_sva_get_pasid(sva);
		if (pasid == IOMMU_PASID_INVALID) {
			iommu_sva_unbind_device(sva);
			goto failed;
		}

		ctx->sva = sva;
		ctx->pasid = pasid;

		if (wq_dedicated(wq)) {
			rc = idxd_wq_set_pasid(wq, pasid);
			if (rc < 0) {
				iommu_sva_unbind_device(sva);
				dev_err(dev, "wq set pasid failed: %d\n", rc);
				goto failed;
			}
		}
	}

128
	idxd_wq_get(wq);
129
	mutex_unlock(&wq->wq_lock);
130
	return 0;
131 132 133 134 135

 failed:
	mutex_unlock(&wq->wq_lock);
	kfree(ctx);
	return rc;
136 137 138 139 140 141 142 143
}

static int idxd_cdev_release(struct inode *node, struct file *filep)
{
	struct idxd_user_context *ctx = filep->private_data;
	struct idxd_wq *wq = ctx->wq;
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
144
	int rc;
145 146 147 148

	dev_dbg(dev, "%s called\n", __func__);
	filep->private_data = NULL;

149
	/* Wait for in-flight operations to complete. */
150 151 152 153 154 155 156 157 158 159 160 161
	if (wq_shared(wq)) {
		idxd_device_drain_pasid(idxd, ctx->pasid);
	} else {
		if (device_pasid_enabled(idxd)) {
			/* The wq disable in the disable pasid function will drain the wq */
			rc = idxd_wq_disable_pasid(wq);
			if (rc < 0)
				dev_err(dev, "wq disable pasid failed.\n");
		} else {
			idxd_wq_drain(wq);
		}
	}
162

163 164
	if (ctx->sva)
		iommu_sva_unbind_device(ctx->sva);
165
	kfree(ctx);
166
	mutex_lock(&wq->wq_lock);
167
	idxd_wq_put(wq);
168
	mutex_unlock(&wq->wq_lock);
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	return 0;
}

static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
		     const char *func)
{
	struct device *dev = &wq->idxd->pdev->dev;

	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
		dev_info_ratelimited(dev,
				     "%s: %s: mapping too large: %lu\n",
				     current->comm, func,
				     vma->vm_end - vma->vm_start);
		return -EINVAL;
	}

	return 0;
}

static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct idxd_user_context *ctx = filp->private_data;
	struct idxd_wq *wq = ctx->wq;
	struct idxd_device *idxd = wq->idxd;
	struct pci_dev *pdev = idxd->pdev;
	phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
	unsigned long pfn;
	int rc;

	dev_dbg(&pdev->dev, "%s called\n", __func__);
	rc = check_vma(wq, vma, __func__);
200 201
	if (rc < 0)
		return rc;
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221

	vma->vm_flags |= VM_DONTCOPY;
	pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
				IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_private_data = ctx;

	return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
			vma->vm_page_prot);
}

static __poll_t idxd_cdev_poll(struct file *filp,
			       struct poll_table_struct *wait)
{
	struct idxd_user_context *ctx = filp->private_data;
	struct idxd_wq *wq = ctx->wq;
	struct idxd_device *idxd = wq->idxd;
	unsigned long flags;
	__poll_t out = 0;

222
	poll_wait(filp, &wq->err_queue, wait);
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	spin_lock_irqsave(&idxd->dev_lock, flags);
	if (idxd->sw_err.valid)
		out = EPOLLIN | EPOLLRDNORM;
	spin_unlock_irqrestore(&idxd->dev_lock, flags);

	return out;
}

static const struct file_operations idxd_cdev_fops = {
	.owner = THIS_MODULE,
	.open = idxd_cdev_open,
	.release = idxd_cdev_release,
	.mmap = idxd_cdev_mmap,
	.poll = idxd_cdev_poll,
};

int idxd_cdev_get_major(struct idxd_device *idxd)
{
	return MAJOR(ictx[idxd->type].devt);
}

244
int idxd_wq_add_cdev(struct idxd_wq *wq)
245 246
{
	struct idxd_device *idxd = wq->idxd;
247 248
	struct idxd_cdev *idxd_cdev;
	struct cdev *cdev;
249
	struct device *dev;
250 251
	struct idxd_cdev_context *cdev_ctx;
	int rc, minor;
252

253 254
	idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
	if (!idxd_cdev)
255 256
		return -ENOMEM;

257 258 259
	idxd_cdev->wq = wq;
	cdev = &idxd_cdev->cdev;
	dev = &idxd_cdev->dev;
260 261 262
	cdev_ctx = &ictx[wq->idxd->type];
	minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
	if (minor < 0) {
263 264
		kfree(idxd_cdev);
		return minor;
265 266 267
	}
	idxd_cdev->minor = minor;

268 269 270 271 272
	device_initialize(dev);
	dev->parent = &wq->conf_dev;
	dev->bus = idxd_get_bus_type(idxd);
	dev->type = &idxd_cdev_device_type;
	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
273

274 275
	rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
			  idxd->id, wq->id);
276
	if (rc < 0)
277
		goto err;
278

279
	wq->idxd_cdev = idxd_cdev;
280
	cdev_init(cdev, &idxd_cdev_fops);
281
	rc = cdev_device_add(cdev, dev);
282 283
	if (rc) {
		dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
284
		goto err;
285 286 287
	}

	return 0;
288 289 290 291 292

 err:
	put_device(dev);
	wq->idxd_cdev = NULL;
	return rc;
293 294 295 296
}

void idxd_wq_del_cdev(struct idxd_wq *wq)
{
297 298 299 300 301 302 303 304
	struct idxd_cdev *idxd_cdev;
	struct idxd_cdev_context *cdev_ctx;

	cdev_ctx = &ictx[wq->idxd->type];
	idxd_cdev = wq->idxd_cdev;
	wq->idxd_cdev = NULL;
	cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
	put_device(&idxd_cdev->dev);
305 306 307 308 309 310 311 312 313 314 315
}

int idxd_cdev_register(void)
{
	int rc, i;

	for (i = 0; i < IDXD_TYPE_MAX; i++) {
		ida_init(&ictx[i].minor_ida);
		rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
					 ictx[i].name);
		if (rc)
316
			goto err_free_chrdev_region;
317 318 319
	}

	return 0;
320 321 322 323 324 325

err_free_chrdev_region:
	for (i--; i >= 0; i--)
		unregister_chrdev_region(ictx[i].devt, MINORMASK);

	return rc;
326 327 328 329 330 331 332 333 334 335 336
}

void idxd_cdev_remove(void)
{
	int i;

	for (i = 0; i < IDXD_TYPE_MAX; i++) {
		unregister_chrdev_region(ictx[i].devt, MINORMASK);
		ida_destroy(&ictx[i].minor_ida);
	}
}