idxd.h 7.3 KB
Newer Older
1 2 3 4 5 6
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _IDXD_H_
#define _IDXD_H_

#include <linux/sbitmap.h>
7
#include <linux/dmaengine.h>
8 9
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
10
#include <linux/cdev.h>
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
#include "registers.h"

#define IDXD_DRIVER_VERSION	"1.00"

extern struct kmem_cache *idxd_desc_pool;

#define IDXD_REG_TIMEOUT	50
#define IDXD_DRAIN_TIMEOUT	5000

enum idxd_type {
	IDXD_TYPE_UNKNOWN = -1,
	IDXD_TYPE_DSA = 0,
	IDXD_TYPE_MAX
};

#define IDXD_NAME_SIZE		128

struct idxd_device_driver {
	struct device_driver drv;
};

struct idxd_irq_entry {
	struct idxd_device *idxd;
	int id;
	struct llist_head pending_llist;
	struct list_head work_list;
};

struct idxd_group {
	struct device conf_dev;
	struct idxd_device *idxd;
	struct grpcfg grpcfg;
	int id;
	int num_engines;
	int num_wqs;
	bool use_token_limit;
	u8 tokens_allowed;
	u8 tokens_reserved;
	int tc_a;
	int tc_b;
};

#define IDXD_MAX_PRIORITY	0xf

enum idxd_wq_state {
	IDXD_WQ_DISABLED = 0,
	IDXD_WQ_ENABLED,
};

enum idxd_wq_flag {
	WQ_FLAG_DEDICATED = 0,
};

enum idxd_wq_type {
	IDXD_WQT_NONE = 0,
	IDXD_WQT_KERNEL,
67 68 69 70 71 72 73 74
	IDXD_WQT_USER,
};

struct idxd_cdev {
	struct cdev cdev;
	struct device *dev;
	int minor;
	struct wait_queue_head err_queue;
75 76 77 78 79 80
};

#define IDXD_ALLOCATED_BATCH_SIZE	128U
#define WQ_NAME_SIZE   1024
#define WQ_TYPE_SIZE   10

81 82 83 84 85
enum idxd_op_type {
	IDXD_OP_BLOCK = 0,
	IDXD_OP_NONBLOCK = 1,
};

86 87 88 89 90
enum idxd_complete_type {
	IDXD_COMPLETE_NORMAL = 0,
	IDXD_COMPLETE_ABORT,
};

91 92 93
struct idxd_wq {
	void __iomem *dportal;
	struct device conf_dev;
94
	struct idxd_cdev idxd_cdev;
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	struct idxd_device *idxd;
	int id;
	enum idxd_wq_type type;
	struct idxd_group *group;
	int client_count;
	struct mutex wq_lock;	/* mutex for workqueue */
	u32 size;
	u32 threshold;
	u32 priority;
	enum idxd_wq_state state;
	unsigned long flags;
	union wqcfg wqcfg;
	u32 vec_ptr;		/* interrupt steering */
	struct dsa_hw_desc **hw_descs;
	int num_descs;
	struct dsa_completion_record *compls;
	dma_addr_t compls_addr;
	int compls_size;
	struct idxd_desc **descs;
114
	struct sbitmap_queue sbq;
115
	struct dma_chan dma_chan;
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	char name[WQ_NAME_SIZE + 1];
};

struct idxd_engine {
	struct device conf_dev;
	int id;
	struct idxd_group *group;
	struct idxd_device *idxd;
};

/* shadow registers */
struct idxd_hw {
	u32 version;
	union gen_cap_reg gen_cap;
	union wq_cap_reg wq_cap;
	union group_cap_reg group_cap;
	union engine_cap_reg engine_cap;
	struct opcap opcap;
};

enum idxd_device_state {
	IDXD_DEV_HALTED = -1,
	IDXD_DEV_DISABLED = 0,
	IDXD_DEV_CONF_READY,
	IDXD_DEV_ENABLED,
};

enum idxd_device_flag {
	IDXD_FLAG_CONFIGURABLE = 0,
145
	IDXD_FLAG_CMD_RUNNING,
146 147 148 149 150 151 152 153 154 155
};

struct idxd_device {
	enum idxd_type type;
	struct device conf_dev;
	struct list_head list;
	struct idxd_hw hw;
	enum idxd_device_state state;
	unsigned long flags;
	int id;
156
	int major;
157 158 159 160 161

	struct pci_dev *pdev;
	void __iomem *reg_base;

	spinlock_t dev_lock;	/* spinlock for device */
162
	struct completion *cmd_done;
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	struct idxd_group *groups;
	struct idxd_wq *wqs;
	struct idxd_engine *engines;

	int num_groups;

	u32 msix_perm_offset;
	u32 wqcfg_offset;
	u32 grpcfg_offset;
	u32 perfmon_offset;

	u64 max_xfer_bytes;
	u32 max_batch_size;
	int max_groups;
	int max_engines;
	int max_tokens;
	int max_wqs;
	int max_wq_size;
	int token_limit;
182
	int nr_tokens;		/* non-reserved tokens */
183 184

	union sw_err_reg sw_err;
185
	wait_queue_head_t cmd_waitq;
186 187 188
	struct msix_entry *msix_entries;
	int num_wq_irqs;
	struct idxd_irq_entry *irq_entries;
189 190

	struct dma_device dma_dev;
191 192
	struct workqueue_struct *wq;
	struct work_struct work;
193 194 195 196 197 198 199 200
};

/* IDXD software descriptor */
struct idxd_desc {
	struct dsa_hw_desc *hw;
	dma_addr_t desc_dma;
	struct dsa_completion_record *completion;
	dma_addr_t compl_dma;
201
	struct dma_async_tx_descriptor txd;
202 203 204
	struct llist_node llnode;
	struct list_head list;
	int id;
205
	int cpu;
206 207 208 209 210 211
	struct idxd_wq *wq;
};

#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)

212 213
extern struct bus_type dsa_bus_type;

214 215 216 217 218
static inline bool wq_dedicated(struct idxd_wq *wq)
{
	return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
enum idxd_portal_prot {
	IDXD_PORTAL_UNLIMITED = 0,
	IDXD_PORTAL_LIMITED,
};

static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{
	return prot * 0x1000;
}

static inline int idxd_get_wq_portal_full_offset(int wq_id,
						 enum idxd_portal_prot prot)
{
	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}

235 236 237 238 239 240 241 242 243 244
static inline void idxd_set_type(struct idxd_device *idxd)
{
	struct pci_dev *pdev = idxd->pdev;

	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
		idxd->type = IDXD_TYPE_DSA;
	else
		idxd->type = IDXD_TYPE_UNKNOWN;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
static inline void idxd_wq_get(struct idxd_wq *wq)
{
	wq->client_count++;
}

static inline void idxd_wq_put(struct idxd_wq *wq)
{
	wq->client_count--;
}

static inline int idxd_wq_refcount(struct idxd_wq *wq)
{
	return wq->client_count;
};

260
const char *idxd_get_dev_name(struct idxd_device *idxd);
261 262 263 264 265 266
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_setup_sysfs(struct idxd_device *idxd);
void idxd_cleanup_sysfs(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
267
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
268 269 270 271 272 273 274 275 276 277 278 279

/* device interrupt control */
irqreturn_t idxd_irq_handler(int vec, void *data);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
void idxd_mask_msix_vectors(struct idxd_device *idxd);
int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);

/* device control */
280
void idxd_device_init_reset(struct idxd_device *idxd);
281 282
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
283
void idxd_device_reset(struct idxd_device *idxd);
284 285 286 287 288 289 290 291 292
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);

/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
293
void idxd_wq_drain(struct idxd_wq *wq);
294 295
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
296

297 298 299 300 301
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);

302 303 304 305 306 307 308 309 310 311
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
			   enum idxd_complete_type comp_type);
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);

312 313 314 315 316 317 318
/* cdev */
int idxd_cdev_register(void);
void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);

319
#endif