device.c 16.6 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
8
#include <linux/dmaengine.h>
9 10
#include <linux/irq.h>
#include <linux/msi.h>
11
#include <uapi/linux/idxd.h>
12
#include "../dmaengine.h"
13 14 15
#include "idxd.h"
#include "registers.h"

16 17
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
			  u32 *status);
18 19

/* Interrupt control bits */
20
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
21
{
22
	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
23

24
	pci_msi_mask_irq(data);
25 26 27 28 29 30
}

void idxd_mask_msix_vectors(struct idxd_device *idxd)
{
	struct pci_dev *pdev = idxd->pdev;
	int msixcnt = pci_msix_vec_count(pdev);
31
	int i;
32

33 34
	for (i = 0; i < msixcnt; i++)
		idxd_mask_msix_vector(idxd, i);
35 36
}

37
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
38
{
39
	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
40

41
	pci_msi_unmask_irq(data);
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
}

void idxd_unmask_error_interrupts(struct idxd_device *idxd)
{
	union genctrl_reg genctrl;

	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
	genctrl.softerr_int_en = 1;
	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
}

void idxd_mask_error_interrupts(struct idxd_device *idxd)
{
	union genctrl_reg genctrl;

	genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
	genctrl.softerr_int_en = 0;
	iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
}

static void free_hw_descs(struct idxd_wq *wq)
{
	int i;

	for (i = 0; i < wq->num_descs; i++)
		kfree(wq->hw_descs[i]);

	kfree(wq->hw_descs);
}

static int alloc_hw_descs(struct idxd_wq *wq, int num)
{
	struct device *dev = &wq->idxd->pdev->dev;
	int i;
	int node = dev_to_node(dev);

	wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
				    GFP_KERNEL, node);
	if (!wq->hw_descs)
		return -ENOMEM;

	for (i = 0; i < num; i++) {
		wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
					       GFP_KERNEL, node);
		if (!wq->hw_descs[i]) {
			free_hw_descs(wq);
			return -ENOMEM;
		}
	}

	return 0;
}

static void free_descs(struct idxd_wq *wq)
{
	int i;

	for (i = 0; i < wq->num_descs; i++)
		kfree(wq->descs[i]);

	kfree(wq->descs);
}

static int alloc_descs(struct idxd_wq *wq, int num)
{
	struct device *dev = &wq->idxd->pdev->dev;
	int i;
	int node = dev_to_node(dev);

	wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
				 GFP_KERNEL, node);
	if (!wq->descs)
		return -ENOMEM;

	for (i = 0; i < num; i++) {
		wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
					    GFP_KERNEL, node);
		if (!wq->descs[i]) {
			free_descs(wq);
			return -ENOMEM;
		}
	}

	return 0;
}

/* WQ control bits */
int idxd_wq_alloc_resources(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
	int rc, num_descs, i;

135 136 137
	if (wq->type != IDXD_WQT_KERNEL)
		return 0;

138 139
	wq->num_descs = wq->size;
	num_descs = wq->size;
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

	rc = alloc_hw_descs(wq, num_descs);
	if (rc < 0)
		return rc;

	wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
	wq->compls = dma_alloc_coherent(dev, wq->compls_size,
					&wq->compls_addr, GFP_KERNEL);
	if (!wq->compls) {
		rc = -ENOMEM;
		goto fail_alloc_compls;
	}

	rc = alloc_descs(wq, num_descs);
	if (rc < 0)
		goto fail_alloc_descs;

157 158
	rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
				     dev_to_node(dev));
159 160 161 162 163 164 165 166 167 168 169 170
	if (rc < 0)
		goto fail_sbitmap_init;

	for (i = 0; i < num_descs; i++) {
		struct idxd_desc *desc = wq->descs[i];

		desc->hw = wq->hw_descs[i];
		desc->completion = &wq->compls[i];
		desc->compl_dma  = wq->compls_addr +
			sizeof(struct dsa_completion_record) * i;
		desc->id = i;
		desc->wq = wq;
171
		desc->cpu = -1;
172 173
		dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
		desc->txd.tx_submit = idxd_dma_tx_submit;
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	}

	return 0;

 fail_sbitmap_init:
	free_descs(wq);
 fail_alloc_descs:
	dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
 fail_alloc_compls:
	free_hw_descs(wq);
	return rc;
}

void idxd_wq_free_resources(struct idxd_wq *wq)
{
	struct device *dev = &wq->idxd->pdev->dev;

191 192 193
	if (wq->type != IDXD_WQT_KERNEL)
		return;

194 195 196
	free_hw_descs(wq);
	free_descs(wq);
	dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
197
	sbitmap_queue_free(&wq->sbq);
198 199 200 201 202 203 204 205 206 207 208 209 210
}

int idxd_wq_enable(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
	u32 status;

	if (wq->state == IDXD_WQ_ENABLED) {
		dev_dbg(dev, "WQ %d already enabled\n", wq->id);
		return -ENXIO;
	}

211
	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

	if (status != IDXD_CMDSTS_SUCCESS &&
	    status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
		dev_dbg(dev, "WQ enable failed: %#x\n", status);
		return -ENXIO;
	}

	wq->state = IDXD_WQ_ENABLED;
	dev_dbg(dev, "WQ %d enabled\n", wq->id);
	return 0;
}

int idxd_wq_disable(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
	u32 status, operand;

	dev_dbg(dev, "Disabling WQ %d\n", wq->id);

	if (wq->state != IDXD_WQ_ENABLED) {
		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
		return 0;
	}

	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
238
	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
239 240 241 242 243 244 245 246 247 248 249

	if (status != IDXD_CMDSTS_SUCCESS) {
		dev_dbg(dev, "WQ disable failed: %#x\n", status);
		return -ENXIO;
	}

	wq->state = IDXD_WQ_DISABLED;
	dev_dbg(dev, "WQ %d disabled\n", wq->id);
	return 0;
}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
void idxd_wq_drain(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
	u32 operand;

	if (wq->state != IDXD_WQ_ENABLED) {
		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
		return;
	}

	dev_dbg(dev, "Draining WQ %d\n", wq->id);
	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
}

266 267 268 269 270 271 272 273
int idxd_wq_map_portal(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct pci_dev *pdev = idxd->pdev;
	struct device *dev = &pdev->dev;
	resource_size_t start;

	start = pci_resource_start(pdev, IDXD_WQ_BAR);
274
	start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

	wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
	if (!wq->dportal)
		return -ENOMEM;
	dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);

	return 0;
}

void idxd_wq_unmap_portal(struct idxd_wq *wq)
{
	struct device *dev = &wq->idxd->pdev->dev;

	devm_iounmap(dev, wq->dportal);
}

291 292 293 294 295 296 297
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
	int i, wq_offset;

	lockdep_assert_held(&idxd->dev_lock);
298
	memset(wq->wqcfg, 0, idxd->wqcfg_size);
299 300 301 302 303 304 305 306
	wq->type = IDXD_WQT_NONE;
	wq->size = 0;
	wq->group = NULL;
	wq->threshold = 0;
	wq->priority = 0;
	clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
	memset(wq->name, 0, WQ_NAME_SIZE);

307 308
	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
309 310 311 312 313 314 315
		iowrite32(0, idxd->reg_base + wq_offset);
		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
			wq->id, i, wq_offset,
			ioread32(idxd->reg_base + wq_offset));
	}
}

316 317 318 319 320 321 322 323 324 325 326 327
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
	union gensts_reg gensts;

	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);

	if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
		return true;
	return false;
}

328 329 330 331 332 333 334 335 336
static inline bool idxd_device_is_halted(struct idxd_device *idxd)
{
	union gensts_reg gensts;

	gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);

	return (gensts.state == IDXD_DEVICE_STATE_HALT);
}

337 338 339 340 341
/*
 * This is function is only used for reset during probe and will
 * poll for completion. Once the device is setup with interrupts,
 * all commands will be done via interrupt completion.
 */
342
int idxd_device_init_reset(struct idxd_device *idxd)
343
{
344 345 346
	struct device *dev = &idxd->pdev->dev;
	union idxd_command_reg cmd;
	unsigned long flags;
347

348 349 350 351 352
	if (idxd_device_is_halted(idxd)) {
		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
		return -ENXIO;
	}

353 354 355 356 357
	memset(&cmd, 0, sizeof(cmd));
	cmd.cmd = IDXD_CMD_RESET_DEVICE;
	dev_dbg(dev, "%s: sending reset for init.\n", __func__);
	spin_lock_irqsave(&idxd->dev_lock, flags);
	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
358

359 360 361 362
	while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
	       IDXD_CMDSTS_ACTIVE)
		cpu_relax();
	spin_unlock_irqrestore(&idxd->dev_lock, flags);
363
	return 0;
364 365
}

366 367
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
			  u32 *status)
368 369
{
	union idxd_command_reg cmd;
370 371
	DECLARE_COMPLETION_ONSTACK(done);
	unsigned long flags;
372

373 374 375 376 377 378
	if (idxd_device_is_halted(idxd)) {
		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
		*status = IDXD_CMDSTS_HW_ERR;
		return;
	}

379 380 381
	memset(&cmd, 0, sizeof(cmd));
	cmd.cmd = cmd_code;
	cmd.operand = operand;
382 383 384 385 386 387 388
	cmd.int_req = 1;

	spin_lock_irqsave(&idxd->dev_lock, flags);
	wait_event_lock_irq(idxd->cmd_waitq,
			    !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
			    idxd->dev_lock);

389 390
	dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
		__func__, cmd_code, operand);
391

392
	idxd->cmd_status = 0;
393 394
	__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
	idxd->cmd_done = &done;
395 396
	iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);

397 398 399 400 401 402 403
	/*
	 * After command submitted, release lock and go to sleep until
	 * the command completes via interrupt.
	 */
	spin_unlock_irqrestore(&idxd->dev_lock, flags);
	wait_for_completion(&done);
	spin_lock_irqsave(&idxd->dev_lock, flags);
404
	if (status) {
405
		*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
406 407 408
		idxd->cmd_status = *status & GENMASK(7, 0);
	}

409 410 411 412
	__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
	/* Wake up other pending commands */
	wake_up(&idxd->cmd_waitq);
	spin_unlock_irqrestore(&idxd->dev_lock, flags);
413 414 415 416 417 418 419 420 421 422 423 424
}

int idxd_device_enable(struct idxd_device *idxd)
{
	struct device *dev = &idxd->pdev->dev;
	u32 status;

	if (idxd_is_enabled(idxd)) {
		dev_dbg(dev, "Device already enabled\n");
		return -ENXIO;
	}

425
	idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
426 427 428 429 430 431 432 433 434 435 436 437

	/* If the command is successful or if the device was enabled */
	if (status != IDXD_CMDSTS_SUCCESS &&
	    status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
		return -ENXIO;
	}

	idxd->state = IDXD_DEV_ENABLED;
	return 0;
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
void idxd_device_wqs_clear_state(struct idxd_device *idxd)
{
	int i;

	lockdep_assert_held(&idxd->dev_lock);

	for (i = 0; i < idxd->max_wqs; i++) {
		struct idxd_wq *wq = &idxd->wqs[i];

		if (wq->state == IDXD_WQ_ENABLED) {
			idxd_wq_disable_cleanup(wq);
			wq->state = IDXD_WQ_DISABLED;
		}
	}
}

454 455 456 457
int idxd_device_disable(struct idxd_device *idxd)
{
	struct device *dev = &idxd->pdev->dev;
	u32 status;
458
	unsigned long flags;
459 460 461 462 463 464

	if (!idxd_is_enabled(idxd)) {
		dev_dbg(dev, "Device is not enabled\n");
		return 0;
	}

465
	idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
466 467 468 469 470

	/* If the command is successful or if the device was disabled */
	if (status != IDXD_CMDSTS_SUCCESS &&
	    !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
		dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
471
		return -ENXIO;
472 473
	}

474 475
	spin_lock_irqsave(&idxd->dev_lock, flags);
	idxd_device_wqs_clear_state(idxd);
476
	idxd->state = IDXD_DEV_CONF_READY;
477
	spin_unlock_irqrestore(&idxd->dev_lock, flags);
478 479 480
	return 0;
}

481
void idxd_device_reset(struct idxd_device *idxd)
482
{
483 484
	unsigned long flags;

485
	idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
486 487 488 489
	spin_lock_irqsave(&idxd->dev_lock, flags);
	idxd_device_wqs_clear_state(idxd);
	idxd->state = IDXD_DEV_CONF_READY;
	spin_unlock_irqrestore(&idxd->dev_lock, flags);
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
}

/* Device configuration bits */
static void idxd_group_config_write(struct idxd_group *group)
{
	struct idxd_device *idxd = group->idxd;
	struct device *dev = &idxd->pdev->dev;
	int i;
	u32 grpcfg_offset;

	dev_dbg(dev, "Writing group %d cfg registers\n", group->id);

	/* setup GRPWQCFG */
	for (i = 0; i < 4; i++) {
		grpcfg_offset = idxd->grpcfg_offset +
			group->id * 64 + i * sizeof(u64);
		iowrite64(group->grpcfg.wqs[i],
			  idxd->reg_base + grpcfg_offset);
		dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
			group->id, i, grpcfg_offset,
			ioread64(idxd->reg_base + grpcfg_offset));
	}

	/* setup GRPENGCFG */
	grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
	iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
	dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
		grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));

	/* setup GRPFLAGS */
	grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
	iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
	dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
		group->id, grpcfg_offset,
		ioread32(idxd->reg_base + grpcfg_offset));
}

static int idxd_groups_config_write(struct idxd_device *idxd)

{
	union gencfg_reg reg;
	int i;
	struct device *dev = &idxd->pdev->dev;

	/* Setup bandwidth token limit */
	if (idxd->token_limit) {
		reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
		reg.token_limit = idxd->token_limit;
		iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
	}

	dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
		ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));

	for (i = 0; i < idxd->max_groups; i++) {
		struct idxd_group *group = &idxd->groups[i];

		idxd_group_config_write(group);
	}

	return 0;
}

static int idxd_wq_config_write(struct idxd_wq *wq)
{
	struct idxd_device *idxd = wq->idxd;
	struct device *dev = &idxd->pdev->dev;
	u32 wq_offset;
	int i;

	if (!wq->group)
		return 0;

563
	memset(wq->wqcfg, 0, idxd->wqcfg_size);
564 565

	/* byte 0-3 */
566
	wq->wqcfg->wq_size = wq->size;
567 568 569 570 571 572 573

	if (wq->size == 0) {
		dev_warn(dev, "Incorrect work queue size: 0\n");
		return -EINVAL;
	}

	/* bytes 4-7 */
574
	wq->wqcfg->wq_thresh = wq->threshold;
575 576

	/* byte 8-11 */
577 578 579
	wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
	wq->wqcfg->mode = 1;
	wq->wqcfg->priority = wq->priority;
580 581

	/* bytes 12-15 */
582 583
	wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
	wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
584 585

	dev_dbg(dev, "WQ %d CFGs\n", wq->id);
586 587 588
	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
		iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
			wq->id, i, wq_offset,
			ioread32(idxd->reg_base + wq_offset));
	}

	return 0;
}

static int idxd_wqs_config_write(struct idxd_device *idxd)
{
	int i, rc;

	for (i = 0; i < idxd->max_wqs; i++) {
		struct idxd_wq *wq = &idxd->wqs[i];

		rc = idxd_wq_config_write(wq);
		if (rc < 0)
			return rc;
	}

	return 0;
}

static void idxd_group_flags_setup(struct idxd_device *idxd)
{
	int i;

	/* TC-A 0 and TC-B 1 should be defaults */
	for (i = 0; i < idxd->max_groups; i++) {
		struct idxd_group *group = &idxd->groups[i];

		if (group->tc_a == -1)
621
			group->tc_a = group->grpcfg.flags.tc_a = 0;
622 623 624
		else
			group->grpcfg.flags.tc_a = group->tc_a;
		if (group->tc_b == -1)
625
			group->tc_b = group->grpcfg.flags.tc_b = 1;
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
		else
			group->grpcfg.flags.tc_b = group->tc_b;
		group->grpcfg.flags.use_token_limit = group->use_token_limit;
		group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
		if (group->tokens_allowed)
			group->grpcfg.flags.tokens_allowed =
				group->tokens_allowed;
		else
			group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
	}
}

static int idxd_engines_setup(struct idxd_device *idxd)
{
	int i, engines = 0;
	struct idxd_engine *eng;
	struct idxd_group *group;

	for (i = 0; i < idxd->max_groups; i++) {
		group = &idxd->groups[i];
		group->grpcfg.engines = 0;
	}

	for (i = 0; i < idxd->max_engines; i++) {
		eng = &idxd->engines[i];
		group = eng->group;

		if (!group)
			continue;

		group->grpcfg.engines |= BIT(eng->id);
		engines++;
	}

	if (!engines)
		return -EINVAL;

	return 0;
}

static int idxd_wqs_setup(struct idxd_device *idxd)
{
	struct idxd_wq *wq;
	struct idxd_group *group;
	int i, j, configured = 0;
	struct device *dev = &idxd->pdev->dev;

	for (i = 0; i < idxd->max_groups; i++) {
		group = &idxd->groups[i];
		for (j = 0; j < 4; j++)
			group->grpcfg.wqs[j] = 0;
	}

	for (i = 0; i < idxd->max_wqs; i++) {
		wq = &idxd->wqs[i];
		group = wq->group;

		if (!wq->group)
			continue;
		if (!wq->size)
			continue;

		if (!wq_dedicated(wq)) {
			dev_warn(dev, "No shared workqueue support.\n");
			return -EINVAL;
		}

		group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
		configured++;
	}

	if (configured == 0)
		return -EINVAL;

	return 0;
}

int idxd_device_config(struct idxd_device *idxd)
{
	int rc;

	lockdep_assert_held(&idxd->dev_lock);
	rc = idxd_wqs_setup(idxd);
	if (rc < 0)
		return rc;

	rc = idxd_engines_setup(idxd);
	if (rc < 0)
		return rc;

	idxd_group_flags_setup(idxd);

	rc = idxd_wqs_config_write(idxd);
	if (rc < 0)
		return rc;

	rc = idxd_groups_config_write(idxd);
	if (rc < 0)
		return rc;

	return 0;
}