idxd.h 9.5 KB
Newer Older
1 2 3 4 5 6
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _IDXD_H_
#define _IDXD_H_

#include <linux/sbitmap.h>
7
#include <linux/dmaengine.h>
8 9
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
10
#include <linux/cdev.h>
11
#include <linux/idr.h>
12 13 14 15 16 17
#include "registers.h"

#define IDXD_DRIVER_VERSION	"1.00"

extern struct kmem_cache *idxd_desc_pool;

18 19 20
struct idxd_device;
struct idxd_wq;

21 22 23 24 25 26
#define IDXD_REG_TIMEOUT	50
#define IDXD_DRAIN_TIMEOUT	5000

enum idxd_type {
	IDXD_TYPE_UNKNOWN = -1,
	IDXD_TYPE_DSA = 0,
27 28
	IDXD_TYPE_IAX,
	IDXD_TYPE_MAX,
29 30 31 32 33 34 35 36 37 38 39
};

#define IDXD_NAME_SIZE		128

struct idxd_device_driver {
	struct device_driver drv;
};

struct idxd_irq_entry {
	struct idxd_device *idxd;
	int id;
40
	int vector;
41 42
	struct llist_head pending_llist;
	struct list_head work_list;
43 44 45 46 47
	/*
	 * Lock to protect access between irq thread process descriptor
	 * and irq thread processing error descriptor.
	 */
	spinlock_t list_lock;
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
};

struct idxd_group {
	struct device conf_dev;
	struct idxd_device *idxd;
	struct grpcfg grpcfg;
	int id;
	int num_engines;
	int num_wqs;
	bool use_token_limit;
	u8 tokens_allowed;
	u8 tokens_reserved;
	int tc_a;
	int tc_b;
};

#define IDXD_MAX_PRIORITY	0xf

enum idxd_wq_state {
	IDXD_WQ_DISABLED = 0,
	IDXD_WQ_ENABLED,
};

enum idxd_wq_flag {
	WQ_FLAG_DEDICATED = 0,
73
	WQ_FLAG_BLOCK_ON_FAULT,
74 75 76 77 78
};

enum idxd_wq_type {
	IDXD_WQT_NONE = 0,
	IDXD_WQT_KERNEL,
79 80 81 82
	IDXD_WQT_USER,
};

struct idxd_cdev {
83
	struct idxd_wq *wq;
84
	struct cdev cdev;
85
	struct device dev;
86
	int minor;
87 88 89 90 91 92
};

#define IDXD_ALLOCATED_BATCH_SIZE	128U
#define WQ_NAME_SIZE   1024
#define WQ_TYPE_SIZE   10

93 94 95 96 97
enum idxd_op_type {
	IDXD_OP_BLOCK = 0,
	IDXD_OP_NONBLOCK = 1,
};

98 99 100
enum idxd_complete_type {
	IDXD_COMPLETE_NORMAL = 0,
	IDXD_COMPLETE_ABORT,
101
	IDXD_COMPLETE_DEV_FAIL,
102 103
};

104 105 106 107 108
struct idxd_dma_chan {
	struct dma_chan chan;
	struct idxd_wq *wq;
};

109
struct idxd_wq {
110
	void __iomem *portal;
111
	struct device conf_dev;
112 113
	struct idxd_cdev *idxd_cdev;
	struct wait_queue_head err_queue;
114 115 116 117 118 119 120 121 122 123 124
	struct idxd_device *idxd;
	int id;
	enum idxd_wq_type type;
	struct idxd_group *group;
	int client_count;
	struct mutex wq_lock;	/* mutex for workqueue */
	u32 size;
	u32 threshold;
	u32 priority;
	enum idxd_wq_state state;
	unsigned long flags;
125
	union wqcfg *wqcfg;
126 127 128
	u32 vec_ptr;		/* interrupt steering */
	struct dsa_hw_desc **hw_descs;
	int num_descs;
129 130 131 132 133
	union {
		struct dsa_completion_record *compls;
		struct iax_completion_record *iax_compls;
	};
	void *compls_raw;
134
	dma_addr_t compls_addr;
135
	dma_addr_t compls_addr_raw;
136 137
	int compls_size;
	struct idxd_desc **descs;
138
	struct sbitmap_queue sbq;
139
	struct idxd_dma_chan *idxd_chan;
140
	char name[WQ_NAME_SIZE + 1];
141
	u64 max_xfer_bytes;
142
	u32 max_batch_size;
143
	bool ats_dis;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
};

struct idxd_engine {
	struct device conf_dev;
	int id;
	struct idxd_group *group;
	struct idxd_device *idxd;
};

/* shadow registers */
struct idxd_hw {
	u32 version;
	union gen_cap_reg gen_cap;
	union wq_cap_reg wq_cap;
	union group_cap_reg group_cap;
	union engine_cap_reg engine_cap;
	struct opcap opcap;
};

enum idxd_device_state {
	IDXD_DEV_HALTED = -1,
	IDXD_DEV_DISABLED = 0,
	IDXD_DEV_CONF_READY,
	IDXD_DEV_ENABLED,
};

enum idxd_device_flag {
	IDXD_FLAG_CONFIGURABLE = 0,
172
	IDXD_FLAG_CMD_RUNNING,
173
	IDXD_FLAG_PASID_ENABLED,
174 175
};

176 177 178 179 180
struct idxd_dma_dev {
	struct idxd_device *idxd;
	struct dma_device dma;
};

181 182
struct idxd_driver_data {
	const char *name_prefix;
183
	enum idxd_type type;
184 185 186 187 188 189
	struct device_type *dev_type;
	int compl_size;
	int align;
};

struct idxd_device {
190
	struct device conf_dev;
191
	struct idxd_driver_data *data;
192 193 194 195 196
	struct list_head list;
	struct idxd_hw hw;
	enum idxd_device_state state;
	unsigned long flags;
	int id;
197
	int major;
198
	u8 cmd_status;
199 200 201 202 203

	struct pci_dev *pdev;
	void __iomem *reg_base;

	spinlock_t dev_lock;	/* spinlock for device */
204
	struct completion *cmd_done;
205
	struct idxd_group **groups;
206
	struct idxd_wq **wqs;
207
	struct idxd_engine **engines;
208

209 210 211
	struct iommu_sva *sva;
	unsigned int pasid;

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	int num_groups;

	u32 msix_perm_offset;
	u32 wqcfg_offset;
	u32 grpcfg_offset;
	u32 perfmon_offset;

	u64 max_xfer_bytes;
	u32 max_batch_size;
	int max_groups;
	int max_engines;
	int max_tokens;
	int max_wqs;
	int max_wq_size;
	int token_limit;
227
	int nr_tokens;		/* non-reserved tokens */
228
	unsigned int wqcfg_size;
229 230

	union sw_err_reg sw_err;
231
	wait_queue_head_t cmd_waitq;
232 233
	int num_wq_irqs;
	struct idxd_irq_entry *irq_entries;
234

235
	struct idxd_dma_dev *idxd_dma;
236 237
	struct workqueue_struct *wq;
	struct work_struct work;
238 239 240 241
};

/* IDXD software descriptor */
struct idxd_desc {
242 243 244 245
	union {
		struct dsa_hw_desc *hw;
		struct iax_hw_desc *iax_hw;
	};
246
	dma_addr_t desc_dma;
247 248 249 250
	union {
		struct dsa_completion_record *completion;
		struct iax_completion_record *iax_completion;
	};
251
	dma_addr_t compl_dma;
252
	struct dma_async_tx_descriptor txd;
253 254 255
	struct llist_node llnode;
	struct list_head list;
	int id;
256
	int cpu;
257 258 259 260 261 262
	struct idxd_wq *wq;
};

#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)

263
extern struct bus_type dsa_bus_type;
264
extern struct bus_type iax_bus_type;
265

266
extern bool support_enqcmd;
D
Dave Jiang 已提交
267
extern struct ida idxd_ida;
268 269
extern struct device_type dsa_device_type;
extern struct device_type iax_device_type;
270
extern struct device_type idxd_wq_device_type;
271
extern struct device_type idxd_engine_device_type;
272
extern struct device_type idxd_group_device_type;
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

static inline bool is_dsa_dev(struct device *dev)
{
	return dev->type == &dsa_device_type;
}

static inline bool is_iax_dev(struct device *dev)
{
	return dev->type == &iax_device_type;
}

static inline bool is_idxd_dev(struct device *dev)
{
	return is_dsa_dev(dev) || is_iax_dev(dev);
}
288

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static inline bool is_idxd_wq_dev(struct device *dev)
{
	return dev->type == &idxd_wq_device_type;
}

static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
{
	if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
		return true;
	return false;
}

static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
{
	return wq->type == IDXD_WQT_USER;
}

306 307 308 309 310
static inline bool wq_dedicated(struct idxd_wq *wq)
{
	return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
static inline bool wq_shared(struct idxd_wq *wq)
{
	return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

static inline bool device_pasid_enabled(struct idxd_device *idxd)
{
	return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}

static inline bool device_swq_supported(struct idxd_device *idxd)
{
	return (support_enqcmd && device_pasid_enabled(idxd));
}

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
enum idxd_portal_prot {
	IDXD_PORTAL_UNLIMITED = 0,
	IDXD_PORTAL_LIMITED,
};

static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
	return prot * 0x1000;
}

static inline int idxd_get_wq_portal_full_offset(int wq_id,
						 enum idxd_portal_prot prot)
{
	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
static inline void idxd_wq_get(struct idxd_wq *wq)
{
	wq->client_count++;
}

static inline void idxd_wq_put(struct idxd_wq *wq)
{
	wq->client_count--;
}

static inline int idxd_wq_refcount(struct idxd_wq *wq)
{
	return wq->client_count;
};

int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
359 360
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
361 362
int idxd_register_driver(void);
void idxd_unregister_driver(void);
363 364

/* device interrupt control */
365 366
void idxd_msix_perm_setup(struct idxd_device *idxd);
void idxd_msix_perm_clear(struct idxd_device *idxd);
367 368 369 370 371 372
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
void idxd_mask_msix_vectors(struct idxd_device *idxd);
373 374
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
375 376

/* device control */
377
int idxd_device_init_reset(struct idxd_device *idxd);
378 379
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
380
void idxd_device_reset(struct idxd_device *idxd);
381 382 383
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
384
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
385 386 387 388 389 390

/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
391
void idxd_wq_drain(struct idxd_wq *wq);
392
void idxd_wq_reset(struct idxd_wq *wq);
393 394
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
395
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
396 397
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
398

399 400 401 402 403
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);

404 405 406 407 408 409 410 411 412
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
			   enum idxd_complete_type comp_type);

413 414 415 416 417 418 419
/* cdev */
int idxd_cdev_register(void);
void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);

420
#endif