request_manager.c 21.4 KB
Newer Older
1 2 3 4 5 6
/**********************************************************************
 * Author: Cavium, Inc.
 *
 * Contact: support@cavium.com
 *          Please include "LiquidIO" in the subject.
 *
7
 * Copyright (c) 2003-2016 Cavium, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, Version 2, as
 * published by the Free Software Foundation.
 *
 * This file is distributed in the hope that it will be useful, but
 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 * NONINFRINGEMENT.  See the GNU General Public License for more
 * details.
 **********************************************************************/
#include <linux/pci.h>
#include <linux/netdevice.h>
21
#include <linux/vmalloc.h>
22 23 24 25 26 27 28 29
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_device.h"
30
#include "cn23xx_pf_device.h"
31 32 33 34 35 36 37

struct iq_post_status {
	int status;
	int index;
};

static void check_db_timeout(struct work_struct *work);
R
Raghu Vatsavayi 已提交
38
static void  __check_db_timeout(struct octeon_device *oct, u64 iq_no);
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);

static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
{
	struct octeon_instr_queue *iq =
	    (struct octeon_instr_queue *)oct->instr_queue[iq_no];
	return iq->iqcmd_64B;
}

#define IQ_INSTR_MODE_32B(oct, iq_no)  (!IQ_INSTR_MODE_64B(oct, iq_no))

/* Define this to return the request status comaptible to old code */
/*#define OCTEON_USE_OLD_REQ_STATUS*/

/* Return 0 on success, 1 on failure */
int octeon_init_instr_queue(struct octeon_device *oct,
56 57
			    union oct_txpciq txpciq,
			    u32 num_descs)
58 59 60
{
	struct octeon_instr_queue *iq;
	struct octeon_iq_config *conf = NULL;
61
	u32 iq_no = (u32)txpciq.s.q_no;
62 63
	u32 q_size;
	struct cavium_wq *db_wq;
64 65
	int orig_node = dev_to_node(&oct->pci_dev->dev);
	int numa_node = cpu_to_node(iq_no % num_online_cpus());
66 67

	if (OCTEON_CN6XXX(oct))
68
		conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
69
	else if (OCTEON_CN23XX_PF(oct))
70
		conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
	if (!conf) {
		dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
			oct->chip_id);
		return 1;
	}

	if (num_descs & (num_descs - 1)) {
		dev_err(&oct->pci_dev->dev,
			"Number of descriptors for instr queue %d not in power of 2.\n",
			iq_no);
		return 1;
	}

	q_size = (u32)conf->instr_type * num_descs;

	iq = oct->instr_queue[iq_no];
87

88
	iq->oct_dev = oct;
89

90
	set_dev_node(&oct->pci_dev->dev, numa_node);
91 92
	iq->base_addr = lio_dma_alloc(oct, q_size,
				      (dma_addr_t *)&iq->base_addr_dma);
93 94 95 96
	set_dev_node(&oct->pci_dev->dev, orig_node);
	if (!iq->base_addr)
		iq->base_addr = lio_dma_alloc(oct, q_size,
					      (dma_addr_t *)&iq->base_addr_dma);
97 98 99 100 101 102 103 104 105 106 107
	if (!iq->base_addr) {
		dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
			iq_no);
		return 1;
	}

	iq->max_count = num_descs;

	/* Initialize a list to holds requests that have been posted to Octeon
	 * but has yet to be fetched by octeon
	 */
108 109 110 111 112
	iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
					       numa_node);
	if (!iq->request_list)
		iq->request_list = vmalloc(sizeof(*iq->request_list) *
						  num_descs);
113 114 115 116 117 118 119 120 121 122 123 124
	if (!iq->request_list) {
		lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
		dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
			iq_no);
		return 1;
	}

	memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);

	dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
		iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);

125
	iq->txpciq.u64 = txpciq.u64;
126 127 128 129 130 131 132 133 134 135 136 137
	iq->fill_threshold = (u32)conf->db_min;
	iq->fill_cnt = 0;
	iq->host_write_index = 0;
	iq->octeon_read_index = 0;
	iq->flush_index = 0;
	iq->last_db_time = 0;
	iq->do_auto_flush = 1;
	iq->db_timeout = (u32)conf->db_timeout;
	atomic_set(&iq->instr_pending, 0);

	/* Initialize the spinlock for this instruction queue */
	spin_lock_init(&iq->lock);
R
Raghu Vatsavayi 已提交
138 139 140
	spin_lock_init(&iq->post_lock);

	spin_lock_init(&iq->iq_flush_running_lock);
141

R
Raghu Vatsavayi 已提交
142
	oct->io_qmask.iq |= BIT_ULL(iq_no);
143 144 145 146 147 148 149

	/* Set the 32B/64B mode for each input queue */
	oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
	iq->iqcmd_64B = (conf->instr_type == 64);

	oct->fn_list.setup_iq_regs(oct, iq_no);

150 151 152
	oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
						     WQ_MEM_RECLAIM,
						     0);
153
	if (!oct->check_db_wq[iq_no].wq) {
R
Raghu Vatsavayi 已提交
154 155
		vfree(iq->request_list);
		iq->request_list = NULL;
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
		lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
		dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
			iq_no);
		return 1;
	}

	db_wq = &oct->check_db_wq[iq_no];

	INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
	db_wq->wk.ctxptr = oct;
	db_wq->wk.ctxul = iq_no;
	queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));

	return 0;
}

int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
{
	u64 desc_size = 0, q_size;
	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];

	cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
	destroy_workqueue(oct->check_db_wq[iq_no].wq);

	if (OCTEON_CN6XXX(oct))
		desc_size =
182
		    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
183 184
	else if (OCTEON_CN23XX_PF(oct))
		desc_size =
185
		    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
186

187
	vfree(iq->request_list);
188 189 190 191 192 193 194 195 196 197 198 199

	if (iq->base_addr) {
		q_size = iq->max_count * desc_size;
		lio_dma_free(oct, (u32)q_size, iq->base_addr,
			     iq->base_addr_dma);
		return 0;
	}
	return 1;
}

/* Return 0 on success, 1 on failure */
int octeon_setup_iq(struct octeon_device *oct,
200 201
		    int ifidx,
		    int q_index,
202
		    union oct_txpciq txpciq,
203 204 205
		    u32 num_descs,
		    void *app_ctx)
{
206 207 208
	u32 iq_no = (u32)txpciq.s.q_no;
	int numa_node = cpu_to_node(iq_no % num_online_cpus());

209 210 211
	if (oct->instr_queue[iq_no]) {
		dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
			iq_no);
212
		oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
213 214 215 216
		oct->instr_queue[iq_no]->app_ctx = app_ctx;
		return 0;
	}
	oct->instr_queue[iq_no] =
217 218 219 220
	    vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
	if (!oct->instr_queue[iq_no])
		oct->instr_queue[iq_no] =
		    vmalloc(sizeof(struct octeon_instr_queue));
221 222 223 224 225 226
	if (!oct->instr_queue[iq_no])
		return 1;

	memset(oct->instr_queue[iq_no], 0,
	       sizeof(struct octeon_instr_queue));

227
	oct->instr_queue[iq_no]->q_index = q_index;
228
	oct->instr_queue[iq_no]->app_ctx = app_ctx;
229 230
	oct->instr_queue[iq_no]->ifidx = ifidx;

231
	if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
232 233 234 235 236 237
		vfree(oct->instr_queue[iq_no]);
		oct->instr_queue[iq_no] = NULL;
		return 1;
	}

	oct->num_iqs++;
R
Raghu Vatsavayi 已提交
238 239 240
	if (oct->fn_list.enable_io_queues(oct))
		return 1;

241 242 243 244 245 246 247 248 249 250
	return 0;
}

int lio_wait_for_instr_fetch(struct octeon_device *oct)
{
	int i, retry = 1000, pending, instr_cnt = 0;

	do {
		instr_cnt = 0;

251
		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
R
Raghu Vatsavayi 已提交
252
			if (!(oct->io_qmask.iq & BIT_ULL(i)))
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
				continue;
			pending =
			    atomic_read(&oct->
					       instr_queue[i]->instr_pending);
			if (pending)
				__check_db_timeout(oct, i);
			instr_cnt += pending;
		}

		if (instr_cnt == 0)
			break;

		schedule_timeout_uninterruptible(1);

	} while (retry-- && instr_cnt);

	return instr_cnt;
}

static inline void
ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
{
	if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
		writel(iq->fill_cnt, iq->doorbell_reg);
		/* make sure doorbell write goes through */
		mmiowb();
		iq->fill_cnt = 0;
		iq->last_db_time = jiffies;
		return;
	}
}

static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
				      u8 *cmd)
{
	u8 *iqptr, cmdsize;

	cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
	iqptr = iq->base_addr + (cmdsize * iq->host_write_index);

	memcpy(iqptr, cmd, cmdsize);
}

static inline struct iq_post_status
R
Raghu Vatsavayi 已提交
297
__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
{
	struct iq_post_status st;

	st.status = IQ_SEND_OK;

	/* This ensures that the read index does not wrap around to the same
	 * position if queue gets full before Octeon could fetch any instr.
	 */
	if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
		st.status = IQ_SEND_FAILED;
		st.index = -1;
		return st;
	}

	if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
		st.status = IQ_SEND_STOP;

	__copy_cmd_into_iq(iq, cmd);

	/* "index" is returned, host_write_index is modified. */
	st.index = iq->host_write_index;
319 320
	iq->host_write_index = incr_index(iq->host_write_index, 1,
					  iq->max_count);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
	iq->fill_cnt++;

	/* Flush the command into memory. We need to be sure the data is in
	 * memory before indicating that the instruction is pending.
	 */
	wmb();

	atomic_inc(&iq->instr_pending);

	return st;
}

int
octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
				void (*fn)(void *))
{
	if (reqtype > REQTYPE_LAST) {
		dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
			__func__, reqtype);
		return -EINVAL;
	}

	reqtype_free_fn[oct->octeon_id][reqtype] = fn;

	return 0;
}

static inline void
__add_to_request_list(struct octeon_instr_queue *iq,
		      int idx, void *buf, int reqtype)
{
	iq->request_list[idx].buf = buf;
	iq->request_list[idx].reqtype = reqtype;
}

356
/* Can only run in process context */
357 358
int
lio_process_iq_request_list(struct octeon_device *oct,
R
Raghu Vatsavayi 已提交
359
			    struct octeon_instr_queue *iq, u32 napi_budget)
360 361 362 363 364
{
	int reqtype;
	void *buf;
	u32 old = iq->flush_index;
	u32 inst_count = 0;
R
Raghu Vatsavayi 已提交
365
	unsigned int pkts_compl = 0, bytes_compl = 0;
366 367
	struct octeon_soft_command *sc;
	struct octeon_instr_irh *irh;
R
Raghu Vatsavayi 已提交
368
	unsigned long flags;
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389

	while (old != iq->octeon_read_index) {
		reqtype = iq->request_list[old].reqtype;
		buf     = iq->request_list[old].buf;

		if (reqtype == REQTYPE_NONE)
			goto skip_this;

		octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
						     &bytes_compl);

		switch (reqtype) {
		case REQTYPE_NORESP_NET:
		case REQTYPE_NORESP_NET_SG:
		case REQTYPE_RESP_NET_SG:
			reqtype_free_fn[oct->octeon_id][reqtype](buf);
			break;
		case REQTYPE_RESP_NET:
		case REQTYPE_SOFT_COMMAND:
			sc = buf;

390 391 392 393 394 395
			if (OCTEON_CN23XX_PF(oct))
				irh = (struct octeon_instr_irh *)
					&sc->cmd.cmd3.irh;
			else
				irh = (struct octeon_instr_irh *)
					&sc->cmd.cmd2.irh;
396 397 398 399 400 401 402
			if (irh->rflag) {
				/* We're expecting a response from Octeon.
				 * It's up to lio_process_ordered_list() to
				 * process  sc. Add sc to the ordered soft
				 * command response list because we expect
				 * a response from Octeon.
				 */
R
Raghu Vatsavayi 已提交
403 404 405 406
				spin_lock_irqsave
					(&oct->response_list
					 [OCTEON_ORDERED_SC_LIST].lock,
					 flags);
407 408 409 410 411
				atomic_inc(&oct->response_list
					[OCTEON_ORDERED_SC_LIST].
					pending_req_count);
				list_add_tail(&sc->node, &oct->response_list
					[OCTEON_ORDERED_SC_LIST].head);
R
Raghu Vatsavayi 已提交
412 413 414 415
				spin_unlock_irqrestore
					(&oct->response_list
					 [OCTEON_ORDERED_SC_LIST].lock,
					 flags);
416 417
			} else {
				if (sc->callback) {
418
					/* This callback must not sleep */
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
					sc->callback(oct, OCTEON_REQUEST_DONE,
						     sc->callback_arg);
				}
			}
			break;
		default:
			dev_err(&oct->pci_dev->dev,
				"%s Unknown reqtype: %d buf: %p at idx %d\n",
				__func__, reqtype, buf, old);
		}

		iq->request_list[old].buf = NULL;
		iq->request_list[old].reqtype = 0;

 skip_this:
		inst_count++;
435
		old = incr_index(old, 1, iq->max_count);
R
Raghu Vatsavayi 已提交
436 437 438

		if ((napi_budget) && (inst_count >= napi_budget))
			break;
439 440 441 442 443 444 445 446 447
	}
	if (bytes_compl)
		octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
						   bytes_compl);
	iq->flush_index = old;

	return inst_count;
}

R
Raghu Vatsavayi 已提交
448 449 450 451
/* Can only be called from process context */
int
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
		u32 pending_thresh, u32 napi_budget)
452 453
{
	u32 inst_processed = 0;
R
Raghu Vatsavayi 已提交
454 455
	u32 tot_inst_processed = 0;
	int tx_done = 1;
456

R
Raghu Vatsavayi 已提交
457 458
	if (!spin_trylock(&iq->iq_flush_running_lock))
		return tx_done;
459

R
Raghu Vatsavayi 已提交
460
	spin_lock_bh(&iq->lock);
461

R
Raghu Vatsavayi 已提交
462
	iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
463 464

	if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
R
Raghu Vatsavayi 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
		do {
			/* Process any outstanding IQ packets. */
			if (iq->flush_index == iq->octeon_read_index)
				break;

			if (napi_budget)
				inst_processed = lio_process_iq_request_list
					(oct, iq,
					 napi_budget - tot_inst_processed);
			else
				inst_processed =
					lio_process_iq_request_list(oct, iq, 0);

			if (inst_processed) {
				atomic_sub(inst_processed, &iq->instr_pending);
				iq->stats.instr_processed += inst_processed;
			}

			tot_inst_processed += inst_processed;
			inst_processed = 0;

		} while (tot_inst_processed < napi_budget);

		if (napi_budget && (tot_inst_processed >= napi_budget))
			tx_done = 0;
490
	}
R
Raghu Vatsavayi 已提交
491 492 493 494 495 496 497 498

	iq->last_db_time = jiffies;

	spin_unlock_bh(&iq->lock);

	spin_unlock(&iq->iq_flush_running_lock);

	return tx_done;
499 500
}

R
Raghu Vatsavayi 已提交
501 502 503 504
/* Process instruction queue after timeout.
 * This routine gets called from a workqueue or when removing the module.
 */
static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
505 506 507 508 509 510
{
	struct octeon_instr_queue *iq;
	u64 next_time;

	if (!oct)
		return;
511

512 513 514 515
	iq = oct->instr_queue[iq_no];
	if (!iq)
		return;

R
Raghu Vatsavayi 已提交
516 517 518
	/* return immediately, if no work pending */
	if (!atomic_read(&iq->instr_pending))
		return;
519 520 521 522 523 524 525
	/* If jiffies - last_db_time < db_timeout do nothing  */
	next_time = iq->last_db_time + iq->db_timeout;
	if (!time_after(jiffies, (unsigned long)next_time))
		return;
	iq->last_db_time = jiffies;

	/* Flush the instruction queue */
R
Raghu Vatsavayi 已提交
526
	octeon_flush_iq(oct, iq, 1, 0);
527 528

	lio_enable_irq(NULL, iq);
529 530 531 532 533 534 535 536 537
}

/* Called by the Poll thread at regular intervals to check the instruction
 * queue for commands to be posted and for commands that were fetched by Octeon.
 */
static void check_db_timeout(struct work_struct *work)
{
	struct cavium_wk *wk = (struct cavium_wk *)work;
	struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
538
	u64 iq_no = wk->ctxul;
539
	struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
R
Raghu Vatsavayi 已提交
540
	u32 delay = 10;
541 542

	__check_db_timeout(oct, iq_no);
R
Raghu Vatsavayi 已提交
543
	queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
544 545 546 547 548 549 550 551 552 553
}

int
octeon_send_command(struct octeon_device *oct, u32 iq_no,
		    u32 force_db, void *cmd, void *buf,
		    u32 datasize, u32 reqtype)
{
	struct iq_post_status st;
	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];

R
Raghu Vatsavayi 已提交
554 555 556 557
	/* Get the lock and prevent other tasks and tx interrupt handler from
	 * running.
	 */
	spin_lock_bh(&iq->post_lock);
558

R
Raghu Vatsavayi 已提交
559
	st = __post_command2(iq, cmd);
560 561 562 563 564 565 566

	if (st.status != IQ_SEND_FAILED) {
		octeon_report_sent_bytes_to_bql(buf, reqtype);
		__add_to_request_list(iq, st.index, buf, reqtype);
		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);

567
		if (force_db)
568 569 570 571 572
			ring_doorbell(oct, iq);
	} else {
		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
	}

R
Raghu Vatsavayi 已提交
573
	spin_unlock_bh(&iq->post_lock);
574

R
Raghu Vatsavayi 已提交
575 576 577
	/* This is only done here to expedite packets being flushed
	 * for cases where there are no IQ completion interrupts.
	 */
578 579 580 581 582 583 584 585 586 587 588 589 590 591

	return st.status;
}

void
octeon_prepare_soft_command(struct octeon_device *oct,
			    struct octeon_soft_command *sc,
			    u8 opcode,
			    u8 subcode,
			    u32 irh_ossp,
			    u64 ossp0,
			    u64 ossp1)
{
	struct octeon_config *oct_cfg;
592
	struct octeon_instr_ih2 *ih2;
593 594
	struct octeon_instr_ih3 *ih3;
	struct octeon_instr_pki_ih3 *pki_ih3;
595 596 597
	struct octeon_instr_irh *irh;
	struct octeon_instr_rdp *rdp;

R
Raghu Vatsavayi 已提交
598 599
	WARN_ON(opcode > 15);
	WARN_ON(subcode > 127);
600 601 602

	oct_cfg = octeon_get_conf(oct);

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	if (OCTEON_CN23XX_PF(oct)) {
		ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;

		ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;

		pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;

		pki_ih3->w           = 1;
		pki_ih3->raw         = 1;
		pki_ih3->utag        = 1;
		pki_ih3->uqpg        =
			oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
		pki_ih3->utt         = 1;
		pki_ih3->tag     = LIO_CONTROL;
		pki_ih3->tagtype = ATOMIC_TAG;
		pki_ih3->qpg         =
			oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
		pki_ih3->pm          = 0x7;
		pki_ih3->sl          = 8;

		if (sc->datasize)
			ih3->dlengsz = sc->datasize;

		irh            = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
		irh->opcode    = opcode;
		irh->subcode   = subcode;

		/* opcode/subcode specific parameters (ossp) */
		irh->ossp       = irh_ossp;
		sc->cmd.cmd3.ossp[0] = ossp0;
		sc->cmd.cmd3.ossp[1] = ossp1;

		if (sc->rdatasize) {
			rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
			rdp->pcie_port = oct->pcie_port;
			rdp->rlen      = sc->rdatasize;

			irh->rflag =  1;
			/*PKI IH3*/
			/* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
			ih3->fsz    = LIO_SOFTCMDRESP_IH3;
		} else {
			irh->rflag =  0;
			/*PKI IH3*/
			/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
			ih3->fsz    = LIO_PCICMD_O3;
		}
650 651

	} else {
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
		ih2          = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
		ih2->tagtype = ATOMIC_TAG;
		ih2->tag     = LIO_CONTROL;
		ih2->raw     = 1;
		ih2->grp     = CFG_GET_CTRL_Q_GRP(oct_cfg);

		if (sc->datasize) {
			ih2->dlengsz = sc->datasize;
			ih2->rs = 1;
		}

		irh            = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
		irh->opcode    = opcode;
		irh->subcode   = subcode;

		/* opcode/subcode specific parameters (ossp) */
		irh->ossp       = irh_ossp;
		sc->cmd.cmd2.ossp[0] = ossp0;
		sc->cmd.cmd2.ossp[1] = ossp1;

		if (sc->rdatasize) {
			rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
			rdp->pcie_port = oct->pcie_port;
			rdp->rlen      = sc->rdatasize;

			irh->rflag =  1;
			/* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
			ih2->fsz   = LIO_SOFTCMDRESP_IH2;
		} else {
			irh->rflag =  0;
			/* irh + ossp[0] + ossp[1] = 24 bytes */
			ih2->fsz   = LIO_PCICMD_O2;
		}
685 686 687 688 689 690
	}
}

int octeon_send_soft_command(struct octeon_device *oct,
			     struct octeon_soft_command *sc)
{
691
	struct octeon_instr_ih2 *ih2;
692
	struct octeon_instr_ih3 *ih3;
693
	struct octeon_instr_irh *irh;
694
	u32 len;
695

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	if (OCTEON_CN23XX_PF(oct)) {
		ih3 =  (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
		if (ih3->dlengsz) {
			WARN_ON(!sc->dmadptr);
			sc->cmd.cmd3.dptr = sc->dmadptr;
		}
		irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
		if (irh->rflag) {
			WARN_ON(!sc->dmarptr);
			WARN_ON(!sc->status_word);
			*sc->status_word = COMPLETION_WORD_INIT;
			sc->cmd.cmd3.rptr = sc->dmarptr;
		}
		len = (u32)ih3->dlengsz;
	} else {
		ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
		if (ih2->dlengsz) {
			WARN_ON(!sc->dmadptr);
			sc->cmd.cmd2.dptr = sc->dmadptr;
		}
		irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
		if (irh->rflag) {
			WARN_ON(!sc->dmarptr);
			WARN_ON(!sc->status_word);
			*sc->status_word = COMPLETION_WORD_INIT;
			sc->cmd.cmd2.rptr = sc->dmarptr;
		}
		len = (u32)ih2->dlengsz;
724 725 726 727 728
	}

	if (sc->wait_time)
		sc->timeout = jiffies + sc->wait_time;

729 730
	return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
				    len, REQTYPE_SOFT_COMMAND));
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
}

int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
{
	int i;
	u64 dma_addr;
	struct octeon_soft_command *sc;

	INIT_LIST_HEAD(&oct->sc_buf_pool.head);
	spin_lock_init(&oct->sc_buf_pool.lock);
	atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);

	for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
		sc = (struct octeon_soft_command *)
			lio_dma_alloc(oct,
				      SOFT_COMMAND_BUFFER_SIZE,
					  (dma_addr_t *)&dma_addr);
R
Raghu Vatsavayi 已提交
748 749
		if (!sc) {
			octeon_free_sc_buffer_pool(oct);
750
			return 1;
R
Raghu Vatsavayi 已提交
751
		}
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766

		sc->dma_addr = dma_addr;
		sc->size = SOFT_COMMAND_BUFFER_SIZE;

		list_add_tail(&sc->node, &oct->sc_buf_pool.head);
	}

	return 0;
}

int octeon_free_sc_buffer_pool(struct octeon_device *oct)
{
	struct list_head *tmp, *tmp2;
	struct octeon_soft_command *sc;

R
Raghu Vatsavayi 已提交
767
	spin_lock_bh(&oct->sc_buf_pool.lock);
768 769 770 771 772 773 774 775 776 777 778

	list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
		list_del(tmp);

		sc = (struct octeon_soft_command *)tmp;

		lio_dma_free(oct, sc->size, sc, sc->dma_addr);
	}

	INIT_LIST_HEAD(&oct->sc_buf_pool.head);

R
Raghu Vatsavayi 已提交
779
	spin_unlock_bh(&oct->sc_buf_pool.lock);
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794

	return 0;
}

struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
						      u32 datasize,
						      u32 rdatasize,
						      u32 ctxsize)
{
	u64 dma_addr;
	u32 size;
	u32 offset = sizeof(struct octeon_soft_command);
	struct octeon_soft_command *sc = NULL;
	struct list_head *tmp;

R
Raghu Vatsavayi 已提交
795
	WARN_ON((offset + datasize + rdatasize + ctxsize) >
796 797
	       SOFT_COMMAND_BUFFER_SIZE);

R
Raghu Vatsavayi 已提交
798
	spin_lock_bh(&oct->sc_buf_pool.lock);
799 800

	if (list_empty(&oct->sc_buf_pool.head)) {
R
Raghu Vatsavayi 已提交
801
		spin_unlock_bh(&oct->sc_buf_pool.lock);
802 803 804 805 806 807 808 809 810 811
		return NULL;
	}

	list_for_each(tmp, &oct->sc_buf_pool.head)
		break;

	list_del(tmp);

	atomic_inc(&oct->sc_buf_pool.alloc_buf_count);

R
Raghu Vatsavayi 已提交
812
	spin_unlock_bh(&oct->sc_buf_pool.lock);
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841

	sc = (struct octeon_soft_command *)tmp;

	dma_addr = sc->dma_addr;
	size = sc->size;

	memset(sc, 0, sc->size);

	sc->dma_addr = dma_addr;
	sc->size = size;

	if (ctxsize) {
		sc->ctxptr = (u8 *)sc + offset;
		sc->ctxsize = ctxsize;
	}

	/* Start data at 128 byte boundary */
	offset = (offset + ctxsize + 127) & 0xffffff80;

	if (datasize) {
		sc->virtdptr = (u8 *)sc + offset;
		sc->dmadptr = dma_addr + offset;
		sc->datasize = datasize;
	}

	/* Start rdata at 128 byte boundary */
	offset = (offset + datasize + 127) & 0xffffff80;

	if (rdatasize) {
R
Raghu Vatsavayi 已提交
842
		WARN_ON(rdatasize < 16);
843 844 845 846 847 848 849 850 851 852 853 854
		sc->virtrptr = (u8 *)sc + offset;
		sc->dmarptr = dma_addr + offset;
		sc->rdatasize = rdatasize;
		sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
	}

	return sc;
}

void octeon_free_soft_command(struct octeon_device *oct,
			      struct octeon_soft_command *sc)
{
R
Raghu Vatsavayi 已提交
855
	spin_lock_bh(&oct->sc_buf_pool.lock);
856 857 858 859 860

	list_add_tail(&sc->node, &oct->sc_buf_pool.head);

	atomic_dec(&oct->sc_buf_pool.alloc_buf_count);

R
Raghu Vatsavayi 已提交
861
	spin_unlock_bh(&oct->sc_buf_pool.lock);
862
}