idxd.h 8.6 KB
Newer Older
1 2 3 4 5 6
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _IDXD_H_
#define _IDXD_H_

#include <linux/sbitmap.h>
7
#include <linux/dmaengine.h>
8 9
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
10
#include <linux/cdev.h>
11 12 13 14 15 16 17 18 19 20 21 22
#include "registers.h"

#define IDXD_DRIVER_VERSION	"1.00"

extern struct kmem_cache *idxd_desc_pool;

#define IDXD_REG_TIMEOUT	50
#define IDXD_DRAIN_TIMEOUT	5000

enum idxd_type {
	IDXD_TYPE_UNKNOWN = -1,
	IDXD_TYPE_DSA = 0,
23 24
	IDXD_TYPE_IAX,
	IDXD_TYPE_MAX,
25 26 27 28 29 30 31 32 33 34 35 36 37
};

#define IDXD_NAME_SIZE		128

struct idxd_device_driver {
	struct device_driver drv;
};

struct idxd_irq_entry {
	struct idxd_device *idxd;
	int id;
	struct llist_head pending_llist;
	struct list_head work_list;
38 39 40 41 42
	/*
	 * Lock to protect access between irq thread process descriptor
	 * and irq thread processing error descriptor.
	 */
	spinlock_t list_lock;
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
};

struct idxd_group {
	struct device conf_dev;
	struct idxd_device *idxd;
	struct grpcfg grpcfg;
	int id;
	int num_engines;
	int num_wqs;
	bool use_token_limit;
	u8 tokens_allowed;
	u8 tokens_reserved;
	int tc_a;
	int tc_b;
};

#define IDXD_MAX_PRIORITY	0xf

enum idxd_wq_state {
	IDXD_WQ_DISABLED = 0,
	IDXD_WQ_ENABLED,
};

enum idxd_wq_flag {
	WQ_FLAG_DEDICATED = 0,
68
	WQ_FLAG_BLOCK_ON_FAULT,
69 70 71 72 73
};

enum idxd_wq_type {
	IDXD_WQT_NONE = 0,
	IDXD_WQT_KERNEL,
74 75 76 77 78 79 80 81
	IDXD_WQT_USER,
};

struct idxd_cdev {
	struct cdev cdev;
	struct device *dev;
	int minor;
	struct wait_queue_head err_queue;
82 83 84 85 86 87
};

#define IDXD_ALLOCATED_BATCH_SIZE	128U
#define WQ_NAME_SIZE   1024
#define WQ_TYPE_SIZE   10

88 89 90 91 92
enum idxd_op_type {
	IDXD_OP_BLOCK = 0,
	IDXD_OP_NONBLOCK = 1,
};

93 94 95
enum idxd_complete_type {
	IDXD_COMPLETE_NORMAL = 0,
	IDXD_COMPLETE_ABORT,
96
	IDXD_COMPLETE_DEV_FAIL,
97 98
};

99
struct idxd_wq {
100
	void __iomem *portal;
101
	struct device conf_dev;
102
	struct idxd_cdev idxd_cdev;
103 104 105 106 107 108 109 110 111 112 113
	struct idxd_device *idxd;
	int id;
	enum idxd_wq_type type;
	struct idxd_group *group;
	int client_count;
	struct mutex wq_lock;	/* mutex for workqueue */
	u32 size;
	u32 threshold;
	u32 priority;
	enum idxd_wq_state state;
	unsigned long flags;
114
	union wqcfg *wqcfg;
115 116 117
	u32 vec_ptr;		/* interrupt steering */
	struct dsa_hw_desc **hw_descs;
	int num_descs;
118 119 120 121 122
	union {
		struct dsa_completion_record *compls;
		struct iax_completion_record *iax_compls;
	};
	void *compls_raw;
123
	dma_addr_t compls_addr;
124
	dma_addr_t compls_addr_raw;
125 126
	int compls_size;
	struct idxd_desc **descs;
127
	struct sbitmap_queue sbq;
128
	struct dma_chan dma_chan;
129
	char name[WQ_NAME_SIZE + 1];
130
	u64 max_xfer_bytes;
131
	u32 max_batch_size;
132
	bool ats_dis;
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
};

struct idxd_engine {
	struct device conf_dev;
	int id;
	struct idxd_group *group;
	struct idxd_device *idxd;
};

/* shadow registers */
struct idxd_hw {
	u32 version;
	union gen_cap_reg gen_cap;
	union wq_cap_reg wq_cap;
	union group_cap_reg group_cap;
	union engine_cap_reg engine_cap;
	struct opcap opcap;
};

enum idxd_device_state {
	IDXD_DEV_HALTED = -1,
	IDXD_DEV_DISABLED = 0,
	IDXD_DEV_CONF_READY,
	IDXD_DEV_ENABLED,
};

enum idxd_device_flag {
	IDXD_FLAG_CONFIGURABLE = 0,
161
	IDXD_FLAG_CMD_RUNNING,
162
	IDXD_FLAG_PASID_ENABLED,
163 164 165 166 167 168 169 170 171 172
};

struct idxd_device {
	enum idxd_type type;
	struct device conf_dev;
	struct list_head list;
	struct idxd_hw hw;
	enum idxd_device_state state;
	unsigned long flags;
	int id;
173
	int major;
174
	u8 cmd_status;
175 176 177 178 179

	struct pci_dev *pdev;
	void __iomem *reg_base;

	spinlock_t dev_lock;	/* spinlock for device */
180
	struct completion *cmd_done;
181 182 183 184
	struct idxd_group *groups;
	struct idxd_wq *wqs;
	struct idxd_engine *engines;

185 186 187
	struct iommu_sva *sva;
	unsigned int pasid;

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	int num_groups;

	u32 msix_perm_offset;
	u32 wqcfg_offset;
	u32 grpcfg_offset;
	u32 perfmon_offset;

	u64 max_xfer_bytes;
	u32 max_batch_size;
	int max_groups;
	int max_engines;
	int max_tokens;
	int max_wqs;
	int max_wq_size;
	int token_limit;
203
	int nr_tokens;		/* non-reserved tokens */
204
	unsigned int wqcfg_size;
205
	int compl_size;
206 207

	union sw_err_reg sw_err;
208
	wait_queue_head_t cmd_waitq;
209 210 211
	struct msix_entry *msix_entries;
	int num_wq_irqs;
	struct idxd_irq_entry *irq_entries;
212 213

	struct dma_device dma_dev;
214 215
	struct workqueue_struct *wq;
	struct work_struct work;
216 217 218 219
};

/* IDXD software descriptor */
struct idxd_desc {
220 221 222 223
	union {
		struct dsa_hw_desc *hw;
		struct iax_hw_desc *iax_hw;
	};
224
	dma_addr_t desc_dma;
225 226 227 228
	union {
		struct dsa_completion_record *completion;
		struct iax_completion_record *iax_completion;
	};
229
	dma_addr_t compl_dma;
230
	struct dma_async_tx_descriptor txd;
231 232 233
	struct llist_node llnode;
	struct list_head list;
	int id;
234
	int cpu;
235 236 237 238 239 240
	struct idxd_wq *wq;
};

#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)

241
extern struct bus_type dsa_bus_type;
242
extern struct bus_type iax_bus_type;
243

244 245
extern bool support_enqcmd;

246 247 248 249 250
static inline bool wq_dedicated(struct idxd_wq *wq)
{
	return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
static inline bool wq_shared(struct idxd_wq *wq)
{
	return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

static inline bool device_pasid_enabled(struct idxd_device *idxd)
{
	return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}

static inline bool device_swq_supported(struct idxd_device *idxd)
{
	return (support_enqcmd && device_pasid_enabled(idxd));
}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
enum idxd_portal_prot {
	IDXD_PORTAL_UNLIMITED = 0,
	IDXD_PORTAL_LIMITED,
};

static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
	return prot * 0x1000;
}

static inline int idxd_get_wq_portal_full_offset(int wq_id,
						 enum idxd_portal_prot prot)
{
	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}

282 283 284 285 286 287
static inline void idxd_set_type(struct idxd_device *idxd)
{
	struct pci_dev *pdev = idxd->pdev;

	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
		idxd->type = IDXD_TYPE_DSA;
288 289
	else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
		idxd->type = IDXD_TYPE_IAX;
290 291 292 293
	else
		idxd->type = IDXD_TYPE_UNKNOWN;
}

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
static inline void idxd_wq_get(struct idxd_wq *wq)
{
	wq->client_count++;
}

static inline void idxd_wq_put(struct idxd_wq *wq)
{
	wq->client_count--;
}

static inline int idxd_wq_refcount(struct idxd_wq *wq)
{
	return wq->client_count;
};

309
const char *idxd_get_dev_name(struct idxd_device *idxd);
310 311 312 313 314 315
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_setup_sysfs(struct idxd_device *idxd);
void idxd_cleanup_sysfs(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
316
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
317 318 319 320 321 322 323 324

/* device interrupt control */
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
void idxd_mask_msix_vectors(struct idxd_device *idxd);
325 326
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
327 328

/* device control */
329
int idxd_device_init_reset(struct idxd_device *idxd);
330 331
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
332
void idxd_device_reset(struct idxd_device *idxd);
333 334 335
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
336
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
337 338 339 340 341 342

/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
343
void idxd_wq_drain(struct idxd_wq *wq);
344 345
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
346
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
347 348
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
349

350 351 352 353 354
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);

355 356 357 358 359 360 361 362 363 364
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
			   enum idxd_complete_type comp_type);
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);

365 366 367 368 369 370 371
/* cdev */
int idxd_cdev_register(void);
void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);

372
#endif