idxd.h 8.1 KB
Newer Older
1 2 3 4 5 6
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _IDXD_H_
#define _IDXD_H_

#include <linux/sbitmap.h>
7
#include <linux/dmaengine.h>
8 9
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
10
#include <linux/cdev.h>
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
#include "registers.h"

#define IDXD_DRIVER_VERSION	"1.00"

extern struct kmem_cache *idxd_desc_pool;

#define IDXD_REG_TIMEOUT	50
#define IDXD_DRAIN_TIMEOUT	5000

enum idxd_type {
	IDXD_TYPE_UNKNOWN = -1,
	IDXD_TYPE_DSA = 0,
	IDXD_TYPE_MAX
};

#define IDXD_NAME_SIZE		128

struct idxd_device_driver {
	struct device_driver drv;
};

struct idxd_irq_entry {
	struct idxd_device *idxd;
	int id;
	struct llist_head pending_llist;
	struct list_head work_list;
};

struct idxd_group {
	struct device conf_dev;
	struct idxd_device *idxd;
	struct grpcfg grpcfg;
	int id;
	int num_engines;
	int num_wqs;
	bool use_token_limit;
	u8 tokens_allowed;
	u8 tokens_reserved;
	int tc_a;
	int tc_b;
};

#define IDXD_MAX_PRIORITY	0xf

enum idxd_wq_state {
	IDXD_WQ_DISABLED = 0,
	IDXD_WQ_ENABLED,
};

enum idxd_wq_flag {
	WQ_FLAG_DEDICATED = 0,
62
	WQ_FLAG_BLOCK_ON_FAULT,
63 64 65 66 67
};

enum idxd_wq_type {
	IDXD_WQT_NONE = 0,
	IDXD_WQT_KERNEL,
68 69 70 71 72 73 74 75
	IDXD_WQT_USER,
};

struct idxd_cdev {
	struct cdev cdev;
	struct device *dev;
	int minor;
	struct wait_queue_head err_queue;
76 77 78 79 80 81
};

#define IDXD_ALLOCATED_BATCH_SIZE	128U
#define WQ_NAME_SIZE   1024
#define WQ_TYPE_SIZE   10

82 83 84 85 86
enum idxd_op_type {
	IDXD_OP_BLOCK = 0,
	IDXD_OP_NONBLOCK = 1,
};

87 88 89
enum idxd_complete_type {
	IDXD_COMPLETE_NORMAL = 0,
	IDXD_COMPLETE_ABORT,
90
	IDXD_COMPLETE_DEV_FAIL,
91 92
};

93
struct idxd_wq {
94
	void __iomem *portal;
95
	struct device conf_dev;
96
	struct idxd_cdev idxd_cdev;
97 98 99 100 101 102 103 104 105 106 107
	struct idxd_device *idxd;
	int id;
	enum idxd_wq_type type;
	struct idxd_group *group;
	int client_count;
	struct mutex wq_lock;	/* mutex for workqueue */
	u32 size;
	u32 threshold;
	u32 priority;
	enum idxd_wq_state state;
	unsigned long flags;
108
	union wqcfg *wqcfg;
109 110 111 112 113 114 115
	u32 vec_ptr;		/* interrupt steering */
	struct dsa_hw_desc **hw_descs;
	int num_descs;
	struct dsa_completion_record *compls;
	dma_addr_t compls_addr;
	int compls_size;
	struct idxd_desc **descs;
116
	struct sbitmap_queue sbq;
117
	struct dma_chan dma_chan;
118
	char name[WQ_NAME_SIZE + 1];
119
	u64 max_xfer_bytes;
120
	u32 max_batch_size;
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
};

struct idxd_engine {
	struct device conf_dev;
	int id;
	struct idxd_group *group;
	struct idxd_device *idxd;
};

/* shadow registers */
struct idxd_hw {
	u32 version;
	union gen_cap_reg gen_cap;
	union wq_cap_reg wq_cap;
	union group_cap_reg group_cap;
	union engine_cap_reg engine_cap;
	struct opcap opcap;
};

enum idxd_device_state {
	IDXD_DEV_HALTED = -1,
	IDXD_DEV_DISABLED = 0,
	IDXD_DEV_CONF_READY,
	IDXD_DEV_ENABLED,
};

enum idxd_device_flag {
	IDXD_FLAG_CONFIGURABLE = 0,
149
	IDXD_FLAG_CMD_RUNNING,
150
	IDXD_FLAG_PASID_ENABLED,
151 152 153 154 155 156 157 158 159 160
};

struct idxd_device {
	enum idxd_type type;
	struct device conf_dev;
	struct list_head list;
	struct idxd_hw hw;
	enum idxd_device_state state;
	unsigned long flags;
	int id;
161
	int major;
162
	u8 cmd_status;
163 164 165 166 167

	struct pci_dev *pdev;
	void __iomem *reg_base;

	spinlock_t dev_lock;	/* spinlock for device */
168
	struct completion *cmd_done;
169 170 171 172
	struct idxd_group *groups;
	struct idxd_wq *wqs;
	struct idxd_engine *engines;

173 174 175
	struct iommu_sva *sva;
	unsigned int pasid;

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	int num_groups;

	u32 msix_perm_offset;
	u32 wqcfg_offset;
	u32 grpcfg_offset;
	u32 perfmon_offset;

	u64 max_xfer_bytes;
	u32 max_batch_size;
	int max_groups;
	int max_engines;
	int max_tokens;
	int max_wqs;
	int max_wq_size;
	int token_limit;
191
	int nr_tokens;		/* non-reserved tokens */
192
	unsigned int wqcfg_size;
193 194

	union sw_err_reg sw_err;
195
	wait_queue_head_t cmd_waitq;
196 197 198
	struct msix_entry *msix_entries;
	int num_wq_irqs;
	struct idxd_irq_entry *irq_entries;
199 200

	struct dma_device dma_dev;
201 202
	struct workqueue_struct *wq;
	struct work_struct work;
203 204 205 206 207 208 209 210
};

/* IDXD software descriptor */
struct idxd_desc {
	struct dsa_hw_desc *hw;
	dma_addr_t desc_dma;
	struct dsa_completion_record *completion;
	dma_addr_t compl_dma;
211
	struct dma_async_tx_descriptor txd;
212 213 214
	struct llist_node llnode;
	struct list_head list;
	int id;
215
	int cpu;
216 217 218 219 220 221
	struct idxd_wq *wq;
};

#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)

222 223
extern struct bus_type dsa_bus_type;

224 225
extern bool support_enqcmd;

226 227 228 229 230
static inline bool wq_dedicated(struct idxd_wq *wq)
{
	return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
static inline bool wq_shared(struct idxd_wq *wq)
{
	return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

static inline bool device_pasid_enabled(struct idxd_device *idxd)
{
	return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}

static inline bool device_swq_supported(struct idxd_device *idxd)
{
	return (support_enqcmd && device_pasid_enabled(idxd));
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
enum idxd_portal_prot {
	IDXD_PORTAL_UNLIMITED = 0,
	IDXD_PORTAL_LIMITED,
};

static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
	return prot * 0x1000;
}

static inline int idxd_get_wq_portal_full_offset(int wq_id,
						 enum idxd_portal_prot prot)
{
	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}

262 263 264 265 266 267 268 269 270 271
static inline void idxd_set_type(struct idxd_device *idxd)
{
	struct pci_dev *pdev = idxd->pdev;

	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
		idxd->type = IDXD_TYPE_DSA;
	else
		idxd->type = IDXD_TYPE_UNKNOWN;
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
static inline void idxd_wq_get(struct idxd_wq *wq)
{
	wq->client_count++;
}

static inline void idxd_wq_put(struct idxd_wq *wq)
{
	wq->client_count--;
}

static inline int idxd_wq_refcount(struct idxd_wq *wq)
{
	return wq->client_count;
};

287
const char *idxd_get_dev_name(struct idxd_device *idxd);
288 289 290 291 292 293
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_setup_sysfs(struct idxd_device *idxd);
void idxd_cleanup_sysfs(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
294
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
295 296 297 298 299 300 301 302

/* device interrupt control */
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
void idxd_mask_msix_vectors(struct idxd_device *idxd);
303 304
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
305 306

/* device control */
307
void idxd_device_init_reset(struct idxd_device *idxd);
308 309
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
310
void idxd_device_reset(struct idxd_device *idxd);
311 312 313
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
314
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
315 316 317 318 319 320

/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
321
void idxd_wq_drain(struct idxd_wq *wq);
322 323
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
324
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
325 326
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
327

328 329 330 331 332
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);

333 334 335 336 337 338 339 340 341 342
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
			   enum idxd_complete_type comp_type);
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);

343 344 345 346 347 348 349
/* cdev */
int idxd_cdev_register(void);
void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);

350
#endif