intel_mid_dma.c 39.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 *  intel_mid_dma.c - Intel Langwell DMA Drivers
 *
 *  Copyright (C) 2008-10 Intel Corp
 *  Author: Vinod Koul <vinod.koul@intel.com>
 *  The driver design is based on dw_dmac driver
 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; version 2 of the License.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License along
 *  with this program; if not, write to the Free Software Foundation, Inc.,
 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 *
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *
 */
#include <linux/pci.h>
#include <linux/interrupt.h>
28
#include <linux/pm_runtime.h>
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
#include <linux/intel_mid_dma.h>

#define MAX_CHAN	4 /*max ch across controllers*/
#include "intel_mid_dma_regs.h"

#define INTEL_MID_DMAC1_ID		0x0814
#define INTEL_MID_DMAC2_ID		0x0813
#define INTEL_MID_GP_DMAC2_ID		0x0827
#define INTEL_MFLD_DMAC1_ID		0x0830
#define LNW_PERIPHRAL_MASK_BASE		0xFFAE8008
#define LNW_PERIPHRAL_MASK_SIZE		0x10
#define LNW_PERIPHRAL_STATUS		0x0
#define LNW_PERIPHRAL_MASK		0x8

struct intel_mid_dma_probe_info {
	u8 max_chan;
	u8 ch_base;
	u16 block_size;
	u32 pimr_mask;
};

#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
	((kernel_ulong_t)&(struct intel_mid_dma_probe_info) {	\
		.max_chan = (_max_chan),			\
		.ch_base = (_ch_base),				\
		.block_size = (_block_size),			\
		.pimr_mask = (_pimr_mask),			\
	})

/*****************************************************************************
Utility Functions*/
/**
 * get_ch_index	-	convert status to channel
 * @status: status mask
 * @base: dma ch base value
 *
 * Modify the status mask and return the channel index needing
 * attention (or -1 if neither)
 */
static int get_ch_index(int *status, unsigned int base)
{
	int i;
	for (i = 0; i < MAX_CHAN; i++) {
		if (*status & (1 << (i + base))) {
			*status = *status & ~(1 << (i + base));
			pr_debug("MDMA: index %d New status %x\n", i, *status);
			return i;
		}
	}
	return -1;
}

/**
 * get_block_ts	-	calculates dma transaction length
 * @len: dma transfer length
 * @tx_width: dma transfer src width
 * @block_size: dma controller max block size
 *
 * Based on src width calculate the DMA trsaction length in data items
 * return data items or FFFF if exceeds max length for block
 */
static int get_block_ts(int len, int tx_width, int block_size)
{
	int byte_width = 0, block_ts = 0;

	switch (tx_width) {
95
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
96 97
		byte_width = 1;
		break;
98
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
99 100
		byte_width = 2;
		break;
101
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	default:
		byte_width = 4;
		break;
	}

	block_ts = len/byte_width;
	if (block_ts > block_size)
		block_ts = 0xFFFF;
	return block_ts;
}

/*****************************************************************************
DMAC1 interrupt Functions*/

/**
 * dmac1_mask_periphral_intr -	mask the periphral interrupt
 * @midc: dma channel for which masking is required
 *
 * Masks the DMA periphral interrupt
 * this is valid for DMAC1 family controllers only
 * This controller should have periphral mask registers already mapped
 */
static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
{
	u32 pimr;
	struct middma_device *mid = to_middma_device(midc->chan.device);

	if (mid->pimr_mask) {
		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
		pimr |= mid->pimr_mask;
		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
	}
	return;
}

/**
 * dmac1_unmask_periphral_intr -	unmask the periphral interrupt
 * @midc: dma channel for which masking is required
 *
 * UnMasks the DMA periphral interrupt,
 * this is valid for DMAC1 family controllers only
 * This controller should have periphral mask registers already mapped
 */
static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
{
	u32 pimr;
	struct middma_device *mid = to_middma_device(midc->chan.device);

	if (mid->pimr_mask) {
		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
		pimr &= ~mid->pimr_mask;
		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
	}
	return;
}

/**
 * enable_dma_interrupt -	enable the periphral interrupt
 * @midc: dma channel for which enable interrupt is required
 *
 * Enable the DMA periphral interrupt,
 * this is valid for DMAC1 family controllers only
 * This controller should have periphral mask registers already mapped
 */
static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
	dmac1_unmask_periphral_intr(midc);

	/*en ch interrupts*/
	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
	return;
}

/**
 * disable_dma_interrupt -	disable the periphral interrupt
 * @midc: dma channel for which disable interrupt is required
 *
 * Disable the DMA periphral interrupt,
 * this is valid for DMAC1 family controllers only
 * This controller should have periphral mask registers already mapped
 */
static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
	/*Check LPE PISR, make sure fwd is disabled*/
	dmac1_mask_periphral_intr(midc);
	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
	return;
}

/*****************************************************************************
DMA channel helper Functions*/
/**
 * mid_desc_get		-	get a descriptor
 * @midc: dma channel for which descriptor is required
 *
 * Obtain a descriptor for the channel. Returns NULL if none are free.
 * Once the descriptor is returned it is private until put on another
 * list or freed
 */
static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
{
	struct intel_mid_dma_desc *desc, *_desc;
	struct intel_mid_dma_desc *ret = NULL;

	spin_lock_bh(&midc->lock);
	list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
		if (async_tx_test_ack(&desc->txd)) {
			list_del(&desc->desc_node);
			ret = desc;
			break;
		}
	}
	spin_unlock_bh(&midc->lock);
	return ret;
}

/**
 * mid_desc_put		-	put a descriptor
 * @midc: dma channel for which descriptor is required
 * @desc: descriptor to put
 *
 * Return a descriptor from lwn_desc_get back to the free pool
 */
static void midc_desc_put(struct intel_mid_dma_chan *midc,
			struct intel_mid_dma_desc *desc)
{
	if (desc) {
		spin_lock_bh(&midc->lock);
		list_add_tail(&desc->desc_node, &midc->free_list);
		spin_unlock_bh(&midc->lock);
	}
}
/**
 * midc_dostart		-		begin a DMA transaction
 * @midc: channel for which txn is to be started
 * @first: first descriptor of series
 *
 * Load a transaction into the engine. This must be called with midc->lock
 * held and bh disabled.
 */
static void midc_dostart(struct intel_mid_dma_chan *midc,
			struct intel_mid_dma_desc *first)
{
	struct middma_device *mid = to_middma_device(midc->chan.device);

	/*  channel is idle */
251
	if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
252 253 254 255 256
		/*error*/
		pr_err("ERR_MDMA: channel is busy in start\n");
		/* The tasklet will hopefully advance the queue... */
		return;
	}
257
	midc->busy = true;
258 259 260
	/*write registers and en*/
	iowrite32(first->sar, midc->ch_regs + SAR);
	iowrite32(first->dar, midc->ch_regs + DAR);
261
	iowrite32(first->lli_phys, midc->ch_regs + LLP);
262 263 264 265 266 267 268
	iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
	iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
	iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
	iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
	pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
		(int)first->sar, (int)first->dar, first->cfg_hi,
		first->cfg_lo, first->ctl_hi, first->ctl_lo);
269
	first->status = DMA_IN_PROGRESS;
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

	iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
}

/**
 * midc_descriptor_complete	-	process completed descriptor
 * @midc: channel owning the descriptor
 * @desc: the descriptor itself
 *
 * Process a completed descriptor and perform any callbacks upon
 * the completion. The completion handling drops the lock during the
 * callbacks but must be called with the lock held.
 */
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
	       struct intel_mid_dma_desc *desc)
{
	struct dma_async_tx_descriptor	*txd = &desc->txd;
	dma_async_tx_callback callback_txd = NULL;
288
	struct intel_mid_dma_lli	*llitem;
289 290 291 292 293 294
	void *param_txd = NULL;

	midc->completed = txd->cookie;
	callback_txd = txd->callback;
	param_txd = txd->callback_param;

295 296 297 298 299 300 301 302 303
	if (desc->lli != NULL) {
		/*clear the DONE bit of completed LLI in memory*/
		llitem = desc->lli + desc->current_lli;
		llitem->ctl_hi &= CLEAR_DONE;
		if (desc->current_lli < desc->lli_length-1)
			(desc->current_lli)++;
		else
			desc->current_lli = 0;
	}
304 305 306 307
	spin_unlock_bh(&midc->lock);
	if (callback_txd) {
		pr_debug("MDMA: TXD callback set ... calling\n");
		callback_txd(param_txd);
308 309 310 311 312 313 314 315 316 317
	}
	if (midc->raw_tfr) {
		desc->status = DMA_SUCCESS;
		if (desc->lli != NULL) {
			pci_pool_free(desc->lli_pool, desc->lli,
						desc->lli_phys);
			pci_pool_destroy(desc->lli_pool);
		}
		list_move(&desc->desc_node, &midc->free_list);
		midc->busy = false;
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	}
	spin_lock_bh(&midc->lock);

}
/**
 * midc_scan_descriptors -		check the descriptors in channel
 *					mark completed when tx is completete
 * @mid: device
 * @midc: channel to scan
 *
 * Walk the descriptor chain for the device and process any entries
 * that are complete.
 */
static void midc_scan_descriptors(struct middma_device *mid,
				struct intel_mid_dma_chan *midc)
{
	struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;

	/*tx is complete*/
	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
338
		if (desc->status == DMA_IN_PROGRESS)
339 340 341
			midc_descriptor_complete(midc, desc);
	}
	return;
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	}
/**
 * midc_lli_fill_sg -		Helper function to convert
 *				SG list to Linked List Items.
 *@midc: Channel
 *@desc: DMA descriptor
 *@sglist: Pointer to SG list
 *@sglen: SG list length
 *@flags: DMA transaction flags
 *
 * Walk through the SG list and convert the SG list into Linked
 * List Items (LLI).
 */
static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
				struct intel_mid_dma_desc *desc,
				struct scatterlist *sglist,
				unsigned int sglen,
				unsigned int flags)
{
	struct intel_mid_dma_slave *mids;
	struct scatterlist  *sg;
	dma_addr_t lli_next, sg_phy_addr;
	struct intel_mid_dma_lli *lli_bloc_desc;
	union intel_mid_dma_ctl_lo ctl_lo;
	union intel_mid_dma_ctl_hi ctl_hi;
	int i;

	pr_debug("MDMA: Entered midc_lli_fill_sg\n");
370
	mids = midc->mid_slave;
371 372 373

	lli_bloc_desc = desc->lli;
	lli_next = desc->lli_phys;
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
	ctl_lo.ctl_lo = desc->ctl_lo;
	ctl_hi.ctl_hi = desc->ctl_hi;
	for_each_sg(sglist, sg, sglen, i) {
		/*Populate CTL_LOW and LLI values*/
		if (i != sglen - 1) {
			lli_next = lli_next +
				sizeof(struct intel_mid_dma_lli);
		} else {
		/*Check for circular list, otherwise terminate LLI to ZERO*/
			if (flags & DMA_PREP_CIRCULAR_LIST) {
				pr_debug("MDMA: LLI is configured in circular mode\n");
				lli_next = desc->lli_phys;
			} else {
				lli_next = 0;
				ctl_lo.ctlx.llp_dst_en = 0;
				ctl_lo.ctlx.llp_src_en = 0;
			}
		}
		/*Populate CTL_HI values*/
		ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
							desc->width,
							midc->dma->block_size);
		/*Populate SAR and DAR values*/
		sg_phy_addr = sg_phys(sg);
		if (desc->dirn ==  DMA_TO_DEVICE) {
			lli_bloc_desc->sar  = sg_phy_addr;
401
			lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
402
		} else if (desc->dirn ==  DMA_FROM_DEVICE) {
403
			lli_bloc_desc->sar  = mids->dma_slave.src_addr;
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
			lli_bloc_desc->dar  = sg_phy_addr;
		}
		/*Copy values into block descriptor in system memroy*/
		lli_bloc_desc->llp = lli_next;
		lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
		lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;

		lli_bloc_desc++;
	}
	/*Copy very first LLI values to descriptor*/
	desc->ctl_lo = desc->lli->ctl_lo;
	desc->ctl_hi = desc->lli->ctl_hi;
	desc->sar = desc->lli->sar;
	desc->dar = desc->lli->dar;

	return 0;
}
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
/*****************************************************************************
DMA engine callback Functions*/
/**
 * intel_mid_dma_tx_submit -	callback to submit DMA transaction
 * @tx: dma engine descriptor
 *
 * Submit the DMA trasaction for this descriptor, start if ch idle
 */
static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct intel_mid_dma_desc	*desc = to_intel_mid_dma_desc(tx);
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(tx->chan);
	dma_cookie_t		cookie;

	spin_lock_bh(&midc->lock);
	cookie = midc->chan.cookie;

	if (++cookie < 0)
		cookie = 1;

	midc->chan.cookie = cookie;
	desc->txd.cookie = cookie;


445
	if (list_empty(&midc->active_list))
446
		list_add_tail(&desc->desc_node, &midc->active_list);
447
	else
448
		list_add_tail(&desc->desc_node, &midc->queue);
449 450

	midc_dostart(midc, desc);
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
	spin_unlock_bh(&midc->lock);

	return cookie;
}

/**
 * intel_mid_dma_issue_pending -	callback to issue pending txn
 * @chan: chan where pending trascation needs to be checked and submitted
 *
 * Call for scan to issue pending descriptors
 */
static void intel_mid_dma_issue_pending(struct dma_chan *chan)
{
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);

	spin_lock_bh(&midc->lock);
	if (!list_empty(&midc->queue))
		midc_scan_descriptors(to_middma_device(chan->device), midc);
	spin_unlock_bh(&midc->lock);
}

/**
 * intel_mid_dma_tx_status -	Return status of txn
 * @chan: chan for where status needs to be checked
 * @cookie: cookie for txn
 * @txstate: DMA txn state
 *
 * Return status of DMA txn
 */
static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
						dma_cookie_t cookie,
						struct dma_tx_state *txstate)
{
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
	dma_cookie_t		last_used;
	dma_cookie_t		last_complete;
	int				ret;

	last_complete = midc->completed;
	last_used = chan->cookie;

	ret = dma_async_is_complete(cookie, last_complete, last_used);
	if (ret != DMA_SUCCESS) {
		midc_scan_descriptors(to_middma_device(chan->device), midc);

		last_complete = midc->completed;
		last_used = chan->cookie;

		ret = dma_async_is_complete(cookie, last_complete, last_used);
	}

	if (txstate) {
		txstate->last = last_complete;
		txstate->used = last_used;
		txstate->residue = 0;
	}
	return ret;
}

510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
{
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
	struct dma_slave_config  *slave = (struct dma_slave_config *)arg;
	struct intel_mid_dma_slave *mid_slave;

	BUG_ON(!midc);
	BUG_ON(!slave);
	pr_debug("MDMA: slave control called\n");

	mid_slave = to_intel_mid_dma_slave(slave);

	BUG_ON(!mid_slave);

	midc->mid_slave = mid_slave;
	return 0;
}
527 528 529 530 531 532 533 534 535 536 537 538 539 540
/**
 * intel_mid_dma_device_control -	DMA device control
 * @chan: chan for DMA control
 * @cmd: control cmd
 * @arg: cmd arg value
 *
 * Perform DMA control command
 */
static int intel_mid_dma_device_control(struct dma_chan *chan,
			enum dma_ctrl_cmd cmd, unsigned long arg)
{
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
	struct middma_device	*mid = to_middma_device(chan->device);
	struct intel_mid_dma_desc	*desc, *_desc;
541
	union intel_mid_dma_cfg_lo cfg_lo;
542

543 544 545
	if (cmd == DMA_SLAVE_CONFIG)
		return dma_slave_control(chan, arg);

546 547 548 549
	if (cmd != DMA_TERMINATE_ALL)
		return -ENXIO;

	spin_lock_bh(&midc->lock);
550
	if (midc->busy == false) {
551 552 553
		spin_unlock_bh(&midc->lock);
		return 0;
	}
554 555 556 557 558 559
	/*Suspend and disable the channel*/
	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
	cfg_lo.cfgx.ch_susp = 1;
	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
	midc->busy = false;
560 561
	/* Disable interrupts */
	disable_dma_interrupt(midc);
562
	midc->descs_allocated = 0;
563 564

	spin_unlock_bh(&midc->lock);
565 566 567 568 569 570 571
	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
		if (desc->lli != NULL) {
			pci_pool_free(desc->lli_pool, desc->lli,
						desc->lli_phys);
			pci_pool_destroy(desc->lli_pool);
		}
		list_move(&desc->desc_node, &midc->free_list);
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	}
	return 0;
}


/**
 * intel_mid_dma_prep_memcpy -	Prep memcpy txn
 * @chan: chan for DMA transfer
 * @dest: destn address
 * @src: src address
 * @len: DMA transfer len
 * @flags: DMA flags
 *
 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
 * The periphral txn details should be filled in slave structure properly
 * Returns the descriptor for this txn
 */
static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
			struct dma_chan *chan, dma_addr_t dest,
			dma_addr_t src, size_t len, unsigned long flags)
{
	struct intel_mid_dma_chan *midc;
	struct intel_mid_dma_desc *desc = NULL;
	struct intel_mid_dma_slave *mids;
	union intel_mid_dma_ctl_lo ctl_lo;
	union intel_mid_dma_ctl_hi ctl_hi;
	union intel_mid_dma_cfg_lo cfg_lo;
	union intel_mid_dma_cfg_hi cfg_hi;
600
	enum dma_slave_buswidth width;
601 602

	pr_debug("MDMA: Prep for memcpy\n");
K
Koul, Vinod 已提交
603
	BUG_ON(!chan);
604 605 606 607
	if (!len)
		return NULL;

	midc = to_intel_mid_dma_chan(chan);
K
Koul, Vinod 已提交
608
	BUG_ON(!midc);
609

610 611 612
	mids = midc->mid_slave;
	BUG_ON(!mids);

613 614 615
	pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
				midc->dma->pci_id, midc->ch_id, len);
	pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
616 617
			mids->cfg_mode, mids->dma_slave.direction,
			mids->hs_mode, mids->dma_slave.src_addr_width);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635

	/*calculate CFG_LO*/
	if (mids->hs_mode == LNW_DMA_SW_HS) {
		cfg_lo.cfg_lo = 0;
		cfg_lo.cfgx.hs_sel_dst = 1;
		cfg_lo.cfgx.hs_sel_src = 1;
	} else if (mids->hs_mode == LNW_DMA_HW_HS)
		cfg_lo.cfg_lo = 0x00000;

	/*calculate CFG_HI*/
	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
		/*SW HS only*/
		cfg_hi.cfg_hi = 0;
	} else {
		cfg_hi.cfg_hi = 0;
		if (midc->dma->pimr_mask) {
			cfg_hi.cfgx.protctl = 0x0; /*default value*/
			cfg_hi.cfgx.fifo_mode = 1;
636
			if (mids->dma_slave.direction == DMA_TO_DEVICE) {
637 638 639 640 641
				cfg_hi.cfgx.src_per = 0;
				if (mids->device_instance == 0)
					cfg_hi.cfgx.dst_per = 3;
				if (mids->device_instance == 1)
					cfg_hi.cfgx.dst_per = 1;
642
			} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
				if (mids->device_instance == 0)
					cfg_hi.cfgx.src_per = 2;
				if (mids->device_instance == 1)
					cfg_hi.cfgx.src_per = 0;
				cfg_hi.cfgx.dst_per = 0;
			}
		} else {
			cfg_hi.cfgx.protctl = 0x1; /*default value*/
			cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
					midc->ch_id - midc->dma->chan_base;
		}
	}

	/*calculate CTL_HI*/
	ctl_hi.ctlx.reser = 0;
658
	ctl_hi.ctlx.done  = 0;
659
	width = mids->dma_slave.src_addr_width;
660 661 662 663 664 665 666

	ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
	pr_debug("MDMA:calc len %d for block size %d\n",
				ctl_hi.ctlx.block_ts, midc->dma->block_size);
	/*calculate CTL_LO*/
	ctl_lo.ctl_lo = 0;
	ctl_lo.ctlx.int_en = 1;
667 668
	ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
	ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
669

670 671 672 673 674 675 676 677 678 679 680
	/*
	 * Here we need some translation from "enum dma_slave_buswidth"
	 * to the format for our dma controller
	 *		standard	intel_mid_dmac's format
	 *		 1 Byte			0b000
	 *		 2 Bytes		0b001
	 *		 4 Bytes		0b010
	 */
	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;

681 682 683 684 685
	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
		ctl_lo.ctlx.tt_fc = 0;
		ctl_lo.ctlx.sinc = 0;
		ctl_lo.ctlx.dinc = 0;
	} else {
686
		if (mids->dma_slave.direction == DMA_TO_DEVICE) {
687 688 689
			ctl_lo.ctlx.sinc = 0;
			ctl_lo.ctlx.dinc = 2;
			ctl_lo.ctlx.tt_fc = 1;
690
		} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
			ctl_lo.ctlx.sinc = 2;
			ctl_lo.ctlx.dinc = 0;
			ctl_lo.ctlx.tt_fc = 2;
		}
	}

	pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
		ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);

	enable_dma_interrupt(midc);

	desc = midc_desc_get(midc);
	if (desc == NULL)
		goto err_desc_get;
	desc->sar = src;
	desc->dar = dest ;
	desc->len = len;
	desc->cfg_hi = cfg_hi.cfg_hi;
	desc->cfg_lo = cfg_lo.cfg_lo;
	desc->ctl_lo = ctl_lo.ctl_lo;
	desc->ctl_hi = ctl_hi.ctl_hi;
	desc->width = width;
713
	desc->dirn = mids->dma_slave.direction;
714 715 716
	desc->lli_phys = 0;
	desc->lli = NULL;
	desc->lli_pool = NULL;
717 718 719 720 721 722 723
	return &desc->txd;

err_desc_get:
	pr_err("ERR_MDMA: Failed to get desc\n");
	midc_desc_put(midc, desc);
	return NULL;
}
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
/**
 * intel_mid_dma_prep_slave_sg -	Prep slave sg txn
 * @chan: chan for DMA transfer
 * @sgl: scatter gather list
 * @sg_len: length of sg txn
 * @direction: DMA transfer dirtn
 * @flags: DMA flags
 *
 * Prepares LLI based periphral transfer
 */
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
			struct dma_chan *chan, struct scatterlist *sgl,
			unsigned int sg_len, enum dma_data_direction direction,
			unsigned long flags)
{
	struct intel_mid_dma_chan *midc = NULL;
	struct intel_mid_dma_slave *mids = NULL;
	struct intel_mid_dma_desc *desc = NULL;
	struct dma_async_tx_descriptor *txd = NULL;
	union intel_mid_dma_ctl_lo ctl_lo;

	pr_debug("MDMA: Prep for slave SG\n");

	if (!sg_len) {
		pr_err("MDMA: Invalid SG length\n");
		return NULL;
	}
	midc = to_intel_mid_dma_chan(chan);
	BUG_ON(!midc);

754
	mids = midc->mid_slave;
755 756 757
	BUG_ON(!mids);

	if (!midc->dma->pimr_mask) {
758 759 760 761 762 763 764 765 766 767 768 769
		/* We can still handle sg list with only one item */
		if (sg_len == 1) {
			txd = intel_mid_dma_prep_memcpy(chan,
						mids->dma_slave.dst_addr,
						mids->dma_slave.src_addr,
						sgl->length,
						flags);
			return txd;
		} else {
			pr_warn("MDMA: SG list is not supported by this controller\n");
			return  NULL;
		}
770 771 772 773 774 775 776 777 778 779
	}

	pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
			sg_len, direction, flags);

	txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
	if (NULL == txd) {
		pr_err("MDMA: Prep memcpy failed\n");
		return NULL;
	}
780

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	desc = to_intel_mid_dma_desc(txd);
	desc->dirn = direction;
	ctl_lo.ctl_lo = desc->ctl_lo;
	ctl_lo.ctlx.llp_dst_en = 1;
	ctl_lo.ctlx.llp_src_en = 1;
	desc->ctl_lo = ctl_lo.ctl_lo;
	desc->lli_length = sg_len;
	desc->current_lli = 0;
	/* DMA coherent memory pool for LLI descriptors*/
	desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
				midc->dma->pdev,
				(sizeof(struct intel_mid_dma_lli)*sg_len),
				32, 0);
	if (NULL == desc->lli_pool) {
		pr_err("MID_DMA:LLI pool create failed\n");
		return NULL;
	}

	desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
	if (!desc->lli) {
		pr_err("MID_DMA: LLI alloc failed\n");
		pci_pool_destroy(desc->lli_pool);
		return NULL;
	}

	midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
	if (flags & DMA_PREP_INTERRUPT) {
		iowrite32(UNMASK_INTR_REG(midc->ch_id),
				midc->dma_base + MASK_BLOCK);
		pr_debug("MDMA:Enabled Block interrupt\n");
	}
	return &desc->txd;
}
814 815 816 817 818 819 820 821 822 823 824 825 826

/**
 * intel_mid_dma_free_chan_resources -	Frees dma resources
 * @chan: chan requiring attention
 *
 * Frees the allocated resources on this DMA chan
 */
static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
{
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
	struct middma_device	*mid = to_middma_device(chan->device);
	struct intel_mid_dma_desc	*desc, *_desc;

827
	if (true == midc->busy) {
828 829 830
		/*trying to free ch in use!!!!!*/
		pr_err("ERR_MDMA: trying to free ch in use\n");
	}
831
	pm_runtime_put(&mid->pdev->dev);
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
	spin_lock_bh(&midc->lock);
	midc->descs_allocated = 0;
	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
		list_del(&desc->desc_node);
		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
	}
	list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
		list_del(&desc->desc_node);
		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
	}
	list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
		list_del(&desc->desc_node);
		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
	}
	spin_unlock_bh(&midc->lock);
	midc->in_use = false;
848
	midc->busy = false;
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
	/* Disable CH interrupts */
	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
}

/**
 * intel_mid_dma_alloc_chan_resources -	Allocate dma resources
 * @chan: chan requiring attention
 *
 * Allocates DMA resources on this chan
 * Return the descriptors allocated
 */
static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
{
	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
	struct middma_device	*mid = to_middma_device(chan->device);
	struct intel_mid_dma_desc	*desc;
	dma_addr_t		phys;
	int	i = 0;

869 870 871 872 873 874 875 876
	pm_runtime_get_sync(&mid->pdev->dev);

	if (mid->state == SUSPENDED) {
		if (dma_resume(mid->pdev)) {
			pr_err("ERR_MDMA: resume failed");
			return -EFAULT;
		}
	}
877 878 879 880 881

	/* ASSERT:  channel is idle */
	if (test_ch_en(mid->dma_base, midc->ch_id)) {
		/*ch is not idle*/
		pr_err("ERR_MDMA: ch not idle\n");
882
		pm_runtime_put(&mid->pdev->dev);
883 884 885 886 887 888 889 890 891 892
		return -EIO;
	}
	midc->completed = chan->cookie = 1;

	spin_lock_bh(&midc->lock);
	while (midc->descs_allocated < DESCS_PER_CHANNEL) {
		spin_unlock_bh(&midc->lock);
		desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
		if (!desc) {
			pr_err("ERR_MDMA: desc failed\n");
893
			pm_runtime_put(&mid->pdev->dev);
894 895 896 897 898 899 900 901 902 903 904 905
			return -ENOMEM;
			/*check*/
		}
		dma_async_tx_descriptor_init(&desc->txd, chan);
		desc->txd.tx_submit = intel_mid_dma_tx_submit;
		desc->txd.flags = DMA_CTRL_ACK;
		desc->txd.phys = phys;
		spin_lock_bh(&midc->lock);
		i = ++midc->descs_allocated;
		list_add_tail(&desc->desc_node, &midc->free_list);
	}
	spin_unlock_bh(&midc->lock);
906 907
	midc->in_use = true;
	midc->busy = false;
908 909 910 911 912 913
	pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
	return i;
}

/**
 * midc_handle_error -	Handle DMA txn error
L
Lucas De Marchi 已提交
914 915
 * @mid: controller where error occurred
 * @midc: chan where error occurred
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
 *
 * Scan the descriptor for error
 */
static void midc_handle_error(struct middma_device *mid,
		struct intel_mid_dma_chan *midc)
{
	midc_scan_descriptors(mid, midc);
}

/**
 * dma_tasklet -	DMA interrupt tasklet
 * @data: tasklet arg (the controller structure)
 *
 * Scan the controller for interrupts for completion/error
 * Clear the interrupt and call for handling completion/error
 */
static void dma_tasklet(unsigned long data)
{
	struct middma_device *mid = NULL;
	struct intel_mid_dma_chan *midc = NULL;
936
	u32 status, raw_tfr, raw_block;
937 938 939 940 941 942 943 944
	int i;

	mid = (struct middma_device *)data;
	if (mid == NULL) {
		pr_err("ERR_MDMA: tasklet Null param\n");
		return;
	}
	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
945 946 947
	raw_tfr = ioread32(mid->dma_base + RAW_TFR);
	raw_block = ioread32(mid->dma_base + RAW_BLOCK);
	status = raw_tfr | raw_block;
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
	status &= mid->intr_mask;
	while (status) {
		/*txn interrupt*/
		i = get_ch_index(&status, mid->chan_base);
		if (i < 0) {
			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
			return;
		}
		midc = &mid->ch[i];
		if (midc == NULL) {
			pr_err("ERR_MDMA:Null param midc\n");
			return;
		}
		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
				status, midc->ch_id, i);
963 964 965
		midc->raw_tfr = raw_tfr;
		midc->raw_block = raw_block;
		spin_lock_bh(&midc->lock);
966 967
		/*clearing this interrupts first*/
		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
968 969 970 971
		if (raw_block) {
			iowrite32((1 << midc->ch_id),
				mid->dma_base + CLEAR_BLOCK);
		}
972 973 974 975
		midc_scan_descriptors(mid, midc);
		pr_debug("MDMA:Scan of desc... complete, unmasking\n");
		iowrite32(UNMASK_INTR_REG(midc->ch_id),
				mid->dma_base + MASK_TFR);
976 977 978 979
		if (raw_block) {
			iowrite32(UNMASK_INTR_REG(midc->ch_id),
				mid->dma_base + MASK_BLOCK);
		}
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		spin_unlock_bh(&midc->lock);
	}

	status = ioread32(mid->dma_base + RAW_ERR);
	status &= mid->intr_mask;
	while (status) {
		/*err interrupt*/
		i = get_ch_index(&status, mid->chan_base);
		if (i < 0) {
			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
			return;
		}
		midc = &mid->ch[i];
		if (midc == NULL) {
			pr_err("ERR_MDMA:Null param midc\n");
			return;
		}
		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
				status, midc->ch_id, i);

		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
		spin_lock_bh(&midc->lock);
		midc_handle_error(mid, midc);
		iowrite32(UNMASK_INTR_REG(midc->ch_id),
				mid->dma_base + MASK_ERR);
		spin_unlock_bh(&midc->lock);
	}
	pr_debug("MDMA:Exiting takslet...\n");
	return;
}

static void dma_tasklet1(unsigned long data)
{
	pr_debug("MDMA:in takslet1...\n");
	return dma_tasklet(data);
}

static void dma_tasklet2(unsigned long data)
{
	pr_debug("MDMA:in takslet2...\n");
	return dma_tasklet(data);
}

/**
 * intel_mid_dma_interrupt -	DMA ISR
 * @irq: IRQ where interrupt occurred
 * @data: ISR cllback data (the controller structure)
 *
 * See if this is our interrupt if so then schedule the tasklet
 * otherwise ignore
 */
static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
{
	struct middma_device *mid = data;
Y
Yong Wang 已提交
1034
	u32 tfr_status, err_status;
1035 1036
	int call_tasklet = 0;

Y
Yong Wang 已提交
1037 1038 1039 1040 1041
	tfr_status = ioread32(mid->dma_base + RAW_TFR);
	err_status = ioread32(mid->dma_base + RAW_ERR);
	if (!tfr_status && !err_status)
		return IRQ_NONE;

1042 1043
	/*DMA Interrupt*/
	pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
Y
Yong Wang 已提交
1044 1045 1046
	pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
	tfr_status &= mid->intr_mask;
	if (tfr_status) {
1047
		/*need to disable intr*/
1048 1049
		iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
		iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
Y
Yong Wang 已提交
1050
		pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1051 1052
		call_tasklet = 1;
	}
Y
Yong Wang 已提交
1053 1054 1055
	err_status &= mid->intr_mask;
	if (err_status) {
		iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		call_tasklet = 1;
	}
	if (call_tasklet)
		tasklet_schedule(&mid->tasklet);

	return IRQ_HANDLED;
}

static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
{
	return intel_mid_dma_interrupt(irq, data);
}

static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
{
	return intel_mid_dma_interrupt(irq, data);
}

/**
 * mid_setup_dma -	Setup the DMA controller
 * @pdev: Controller PCI device structure
 *
1078 1079
 * Initialize the DMA controller, channels, registers with DMA engine,
 * ISR. Initialize DMA controller channels.
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
 */
static int mid_setup_dma(struct pci_dev *pdev)
{
	struct middma_device *dma = pci_get_drvdata(pdev);
	int err, i;

	/* DMA coherent memory pool for DMA descriptor allocations */
	dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
					sizeof(struct intel_mid_dma_desc),
					32, 0);
	if (NULL == dma->dma_pool) {
		pr_err("ERR_MDMA:pci_pool_create failed\n");
		err = -ENOMEM;
		goto err_dma_pool;
	}

	INIT_LIST_HEAD(&dma->common.channels);
	dma->pci_id = pdev->device;
	if (dma->pimr_mask) {
		dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
					LNW_PERIPHRAL_MASK_SIZE);
		if (dma->mask_reg == NULL) {
L
Lucas De Marchi 已提交
1102
			pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1103 1104 1105 1106 1107 1108 1109 1110
			return -ENOMEM;
		}
	} else
		dma->mask_reg = NULL;

	pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
	/*init CH structures*/
	dma->intr_mask = 0;
1111
	dma->state = RUNNING;
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	for (i = 0; i < dma->max_chan; i++) {
		struct intel_mid_dma_chan *midch = &dma->ch[i];

		midch->chan.device = &dma->common;
		midch->chan.cookie =  1;
		midch->chan.chan_id = i;
		midch->ch_id = dma->chan_base + i;
		pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);

		midch->dma_base = dma->dma_base;
		midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
		midch->dma = dma;
		dma->intr_mask |= 1 << (dma->chan_base + i);
		spin_lock_init(&midch->lock);

		INIT_LIST_HEAD(&midch->active_list);
		INIT_LIST_HEAD(&midch->queue);
		INIT_LIST_HEAD(&midch->free_list);
		/*mask interrupts*/
		iowrite32(MASK_INTR_REG(midch->ch_id),
			dma->dma_base + MASK_BLOCK);
		iowrite32(MASK_INTR_REG(midch->ch_id),
			dma->dma_base + MASK_SRC_TRAN);
		iowrite32(MASK_INTR_REG(midch->ch_id),
			dma->dma_base + MASK_DST_TRAN);
		iowrite32(MASK_INTR_REG(midch->ch_id),
			dma->dma_base + MASK_ERR);
		iowrite32(MASK_INTR_REG(midch->ch_id),
			dma->dma_base + MASK_TFR);

		disable_dma_interrupt(midch);
		list_add_tail(&midch->chan.device_node, &dma->common.channels);
	}
	pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);

	/*init dma structure*/
	dma_cap_zero(dma->common.cap_mask);
	dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
	dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
	dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
	dma->common.dev = &pdev->dev;
	dma->common.chancnt = dma->max_chan;

	dma->common.device_alloc_chan_resources =
					intel_mid_dma_alloc_chan_resources;
	dma->common.device_free_chan_resources =
					intel_mid_dma_free_chan_resources;

	dma->common.device_tx_status = intel_mid_dma_tx_status;
	dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
	dma->common.device_issue_pending = intel_mid_dma_issue_pending;
	dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
	dma->common.device_control = intel_mid_dma_device_control;

	/*enable dma cntrl*/
	iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);

	/*register irq */
	if (dma->pimr_mask) {
		pr_debug("MDMA:Requesting irq shared for DMAC1\n");
		err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
			IRQF_SHARED, "INTEL_MID_DMAC1", dma);
		if (0 != err)
			goto err_irq;
	} else {
		dma->intr_mask = 0x03;
		pr_debug("MDMA:Requesting irq for DMAC2\n");
		err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1180
			IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		if (0 != err)
			goto err_irq;
	}
	/*register device w/ engine*/
	err = dma_async_device_register(&dma->common);
	if (0 != err) {
		pr_err("ERR_MDMA:device_register failed: %d\n", err);
		goto err_engine;
	}
	if (dma->pimr_mask) {
		pr_debug("setting up tasklet1 for DMAC1\n");
		tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
	} else {
		pr_debug("setting up tasklet2 for DMAC2\n");
		tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
	}
	return 0;

err_engine:
	free_irq(pdev->irq, dma);
err_irq:
	pci_pool_destroy(dma->dma_pool);
err_dma_pool:
	pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
	return err;

}

/**
 * middma_shutdown -	Shutdown the DMA controller
 * @pdev: Controller PCI device structure
 *
 * Called by remove
 * Unregister DMa controller, clear all structures and free interrupt
 */
static void middma_shutdown(struct pci_dev *pdev)
{
	struct middma_device *device = pci_get_drvdata(pdev);

	dma_async_device_unregister(&device->common);
	pci_pool_destroy(device->dma_pool);
	if (device->mask_reg)
		iounmap(device->mask_reg);
	if (device->dma_base)
		iounmap(device->dma_base);
	free_irq(pdev->irq, device);
	return;
}

/**
 * intel_mid_dma_probe -	PCI Probe
 * @pdev: Controller PCI device structure
 * @id: pci device id structure
 *
1235
 * Initialize the PCI device, map BARs, query driver data.
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
 * Call setup_dma to complete contoller and chan initilzation
 */
static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
					const struct pci_device_id *id)
{
	struct middma_device *device;
	u32 base_addr, bar_size;
	struct intel_mid_dma_probe_info *info;
	int err;

	pr_debug("MDMA: probe for %x\n", pdev->device);
	info = (void *)id->driver_data;
	pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
				info->max_chan, info->ch_base,
				info->block_size, info->pimr_mask);

	err = pci_enable_device(pdev);
	if (err)
		goto err_enable_device;

	err = pci_request_regions(pdev, "intel_mid_dmac");
	if (err)
		goto err_request_regions;

	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		goto err_set_dma_mask;

	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (err)
		goto err_set_dma_mask;

	device = kzalloc(sizeof(*device), GFP_KERNEL);
	if (!device) {
		pr_err("ERR_MDMA:kzalloc failed probe\n");
		err = -ENOMEM;
		goto err_kzalloc;
	}
	device->pdev = pci_dev_get(pdev);

	base_addr = pci_resource_start(pdev, 0);
	bar_size  = pci_resource_len(pdev, 0);
	device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
	if (!device->dma_base) {
		pr_err("ERR_MDMA:ioremap failed\n");
		err = -ENOMEM;
		goto err_ioremap;
	}
	pci_set_drvdata(pdev, device);
	pci_set_master(pdev);
	device->max_chan = info->max_chan;
	device->chan_base = info->ch_base;
	device->block_size = info->block_size;
	device->pimr_mask = info->pimr_mask;

	err = mid_setup_dma(pdev);
	if (err)
		goto err_dma;

1295
	pm_runtime_put_noidle(&pdev->dev);
1296
	pm_runtime_allow(&pdev->dev);
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
	return 0;

err_dma:
	iounmap(device->dma_base);
err_ioremap:
	pci_dev_put(pdev);
	kfree(device);
err_kzalloc:
err_set_dma_mask:
	pci_release_regions(pdev);
	pci_disable_device(pdev);
err_request_regions:
err_enable_device:
	pr_err("ERR_MDMA:Probe failed %d\n", err);
	return err;
}

/**
 * intel_mid_dma_remove -	PCI remove
 * @pdev: Controller PCI device structure
 *
 * Free up all resources and data
 * Call shutdown_dma to complete contoller and chan cleanup
 */
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
{
	struct middma_device *device = pci_get_drvdata(pdev);
1324 1325 1326

	pm_runtime_get_noresume(&pdev->dev);
	pm_runtime_forbid(&pdev->dev);
1327 1328 1329 1330 1331 1332 1333
	middma_shutdown(pdev);
	pci_dev_put(pdev);
	kfree(device);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
}

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
/* Power Management */
/*
* dma_suspend - PCI suspend function
*
* @pci: PCI device structure
* @state: PM message
*
* This function is called by OS when a power event occurs
*/
int dma_suspend(struct pci_dev *pci, pm_message_t state)
{
	int i;
	struct middma_device *device = pci_get_drvdata(pci);
	pr_debug("MDMA: dma_suspend called\n");

	for (i = 0; i < device->max_chan; i++) {
		if (device->ch[i].in_use)
			return -EAGAIN;
	}
	device->state = SUSPENDED;
	pci_save_state(pci);
	pci_disable_device(pci);
	pci_set_power_state(pci, PCI_D3hot);
	return 0;
}

/**
* dma_resume - PCI resume function
*
* @pci:	PCI device structure
*
* This function is called by OS when a power event occurs
*/
int dma_resume(struct pci_dev *pci)
{
	int ret;
	struct middma_device *device = pci_get_drvdata(pci);

	pr_debug("MDMA: dma_resume called\n");
	pci_set_power_state(pci, PCI_D0);
	pci_restore_state(pci);
	ret = pci_enable_device(pci);
	if (ret) {
L
Lucas De Marchi 已提交
1377
		pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
		return ret;
	}
	device->state = RUNNING;
	iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
	return 0;
}

static int dma_runtime_suspend(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
1388 1389 1390 1391
	struct middma_device *device = pci_get_drvdata(pci_dev);

	device->state = SUSPENDED;
	return 0;
1392 1393 1394 1395 1396
}

static int dma_runtime_resume(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
1397 1398 1399 1400 1401
	struct middma_device *device = pci_get_drvdata(pci_dev);

	device->state = RUNNING;
	iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
	return 0;
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
}

static int dma_runtime_idle(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct middma_device *device = pci_get_drvdata(pdev);
	int i;

	for (i = 0; i < device->max_chan; i++) {
		if (device->ch[i].in_use)
			return -EAGAIN;
	}

	return pm_schedule_suspend(dev, 0);
}

1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
/******************************************************************************
* PCI stuff
*/
static struct pci_device_id intel_mid_dma_ids[] = {
	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),	INFO(2, 6, 4095, 0x200020)},
	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),	INFO(2, 0, 2047, 0)},
	{ PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),	INFO(2, 0, 2047, 0)},
	{ PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID),	INFO(4, 0, 4095, 0x400040)},
	{ 0, }
};
MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);

1430 1431 1432 1433 1434 1435
static const struct dev_pm_ops intel_mid_dma_pm = {
	.runtime_suspend = dma_runtime_suspend,
	.runtime_resume = dma_runtime_resume,
	.runtime_idle = dma_runtime_idle,
};

1436
static struct pci_driver intel_mid_dma_pci_driver = {
1437 1438 1439 1440
	.name		=	"Intel MID DMA",
	.id_table	=	intel_mid_dma_ids,
	.probe		=	intel_mid_dma_probe,
	.remove		=	__devexit_p(intel_mid_dma_remove),
1441 1442 1443 1444 1445 1446 1447
#ifdef CONFIG_PM
	.suspend = dma_suspend,
	.resume = dma_resume,
	.driver = {
		.pm = &intel_mid_dma_pm,
	},
#endif
1448 1449 1450 1451 1452 1453
};

static int __init intel_mid_dma_init(void)
{
	pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
			INTEL_MID_DMA_DRIVER_VERSION);
1454
	return pci_register_driver(&intel_mid_dma_pci_driver);
1455 1456 1457 1458 1459
}
fs_initcall(intel_mid_dma_init);

static void __exit intel_mid_dma_exit(void)
{
1460
	pci_unregister_driver(&intel_mid_dma_pci_driver);
1461 1462 1463 1464 1465 1466 1467
}
module_exit(intel_mid_dma_exit);

MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);