bam_dma.c 38.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */
/*
 * QCOM BAM DMA engine driver
 *
 * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
 * peripherals on the MSM 8x74.  The configuration of the channels are dependent
 * on the way they are hard wired to that specific peripheral.  The peripheral
 * device tree entries specify the configuration of each channel.
 *
 * The DMA controller requires the use of external memory for storage of the
 * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
 * circular buffer and operations are managed according to the offset within the
 * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
 * are back to defaults.
 *
 * During DMA operations, we write descriptors to the FIFO, being careful to
 * handle wrapping and then write the last FIFO offset to that channel's
 * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
 * indicates the current FIFO offset that is being processed, so there is some
 * indication of where the hardware is currently working.
 */

#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
49
#include <linux/circ_buf.h>
50 51
#include <linux/clk.h>
#include <linux/dmaengine.h>
52
#include <linux/pm_runtime.h>
53

54 55
#include "../dmaengine.h"
#include "../virt-dma.h"
56 57

struct bam_desc_hw {
58 59 60
	__le32 addr;		/* Buffer physical address */
	__le16 size;		/* Buffer size in bytes */
	__le16 flags;
61 62
};

63 64
#define BAM_DMA_AUTOSUSPEND_DELAY 100

65 66 67
#define DESC_FLAG_INT BIT(15)
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
68
#define DESC_FLAG_NWD BIT(12)
69
#define DESC_FLAG_CMD BIT(11)
70 71 72 73 74 75

struct bam_async_desc {
	struct virt_dma_desc vd;

	u32 num_desc;
	u32 xfer_len;
76 77 78 79

	/* transaction flags, EOT|EOB|NWD */
	u16 flags;

80 81
	struct bam_desc_hw *curr_desc;

82 83
	/* list node for the desc in the bam_chan list of descriptors */
	struct list_head desc_node;
84 85 86 87 88
	enum dma_transfer_direction dir;
	size_t length;
	struct bam_desc_hw desc[0];
};

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
enum bam_reg {
	BAM_CTRL,
	BAM_REVISION,
	BAM_NUM_PIPES,
	BAM_DESC_CNT_TRSHLD,
	BAM_IRQ_SRCS,
	BAM_IRQ_SRCS_MSK,
	BAM_IRQ_SRCS_UNMASKED,
	BAM_IRQ_STTS,
	BAM_IRQ_CLR,
	BAM_IRQ_EN,
	BAM_CNFG_BITS,
	BAM_IRQ_SRCS_EE,
	BAM_IRQ_SRCS_MSK_EE,
	BAM_P_CTRL,
	BAM_P_RST,
	BAM_P_HALT,
	BAM_P_IRQ_STTS,
	BAM_P_IRQ_CLR,
	BAM_P_IRQ_EN,
	BAM_P_EVNT_DEST_ADDR,
	BAM_P_EVNT_REG,
	BAM_P_SW_OFSTS,
	BAM_P_DATA_FIFO_ADDR,
	BAM_P_DESC_FIFO_ADDR,
	BAM_P_EVNT_GEN_TRSHLD,
	BAM_P_FIFO_SIZES,
};

struct reg_offset_data {
	u32 base_offset;
	unsigned int pipe_mult, evnt_mult, ee_mult;
};

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
static const struct reg_offset_data bam_v1_3_reg_info[] = {
	[BAM_CTRL]		= { 0x0F80, 0x00, 0x00, 0x00 },
	[BAM_REVISION]		= { 0x0F84, 0x00, 0x00, 0x00 },
	[BAM_NUM_PIPES]		= { 0x0FBC, 0x00, 0x00, 0x00 },
	[BAM_DESC_CNT_TRSHLD]	= { 0x0F88, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS]		= { 0x0F8C, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_MSK]	= { 0x0F90, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0FB0, 0x00, 0x00, 0x00 },
	[BAM_IRQ_STTS]		= { 0x0F94, 0x00, 0x00, 0x00 },
	[BAM_IRQ_CLR]		= { 0x0F98, 0x00, 0x00, 0x00 },
	[BAM_IRQ_EN]		= { 0x0F9C, 0x00, 0x00, 0x00 },
	[BAM_CNFG_BITS]		= { 0x0FFC, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_EE]	= { 0x1800, 0x00, 0x00, 0x80 },
	[BAM_IRQ_SRCS_MSK_EE]	= { 0x1804, 0x00, 0x00, 0x80 },
	[BAM_P_CTRL]		= { 0x0000, 0x80, 0x00, 0x00 },
	[BAM_P_RST]		= { 0x0004, 0x80, 0x00, 0x00 },
	[BAM_P_HALT]		= { 0x0008, 0x80, 0x00, 0x00 },
	[BAM_P_IRQ_STTS]	= { 0x0010, 0x80, 0x00, 0x00 },
	[BAM_P_IRQ_CLR]		= { 0x0014, 0x80, 0x00, 0x00 },
	[BAM_P_IRQ_EN]		= { 0x0018, 0x80, 0x00, 0x00 },
	[BAM_P_EVNT_DEST_ADDR]	= { 0x102C, 0x00, 0x40, 0x00 },
	[BAM_P_EVNT_REG]	= { 0x1018, 0x00, 0x40, 0x00 },
	[BAM_P_SW_OFSTS]	= { 0x1000, 0x00, 0x40, 0x00 },
	[BAM_P_DATA_FIFO_ADDR]	= { 0x1024, 0x00, 0x40, 0x00 },
	[BAM_P_DESC_FIFO_ADDR]	= { 0x101C, 0x00, 0x40, 0x00 },
	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1028, 0x00, 0x40, 0x00 },
	[BAM_P_FIFO_SIZES]	= { 0x1020, 0x00, 0x40, 0x00 },
};

static const struct reg_offset_data bam_v1_4_reg_info[] = {
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
	[BAM_CTRL]		= { 0x0000, 0x00, 0x00, 0x00 },
	[BAM_REVISION]		= { 0x0004, 0x00, 0x00, 0x00 },
	[BAM_NUM_PIPES]		= { 0x003C, 0x00, 0x00, 0x00 },
	[BAM_DESC_CNT_TRSHLD]	= { 0x0008, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS]		= { 0x000C, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_MSK]	= { 0x0010, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0030, 0x00, 0x00, 0x00 },
	[BAM_IRQ_STTS]		= { 0x0014, 0x00, 0x00, 0x00 },
	[BAM_IRQ_CLR]		= { 0x0018, 0x00, 0x00, 0x00 },
	[BAM_IRQ_EN]		= { 0x001C, 0x00, 0x00, 0x00 },
	[BAM_CNFG_BITS]		= { 0x007C, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_EE]	= { 0x0800, 0x00, 0x00, 0x80 },
	[BAM_IRQ_SRCS_MSK_EE]	= { 0x0804, 0x00, 0x00, 0x80 },
	[BAM_P_CTRL]		= { 0x1000, 0x1000, 0x00, 0x00 },
	[BAM_P_RST]		= { 0x1004, 0x1000, 0x00, 0x00 },
	[BAM_P_HALT]		= { 0x1008, 0x1000, 0x00, 0x00 },
	[BAM_P_IRQ_STTS]	= { 0x1010, 0x1000, 0x00, 0x00 },
	[BAM_P_IRQ_CLR]		= { 0x1014, 0x1000, 0x00, 0x00 },
	[BAM_P_IRQ_EN]		= { 0x1018, 0x1000, 0x00, 0x00 },
172 173 174
	[BAM_P_EVNT_DEST_ADDR]	= { 0x182C, 0x00, 0x1000, 0x00 },
	[BAM_P_EVNT_REG]	= { 0x1818, 0x00, 0x1000, 0x00 },
	[BAM_P_SW_OFSTS]	= { 0x1800, 0x00, 0x1000, 0x00 },
175 176 177 178 179
	[BAM_P_DATA_FIFO_ADDR]	= { 0x1824, 0x00, 0x1000, 0x00 },
	[BAM_P_DESC_FIFO_ADDR]	= { 0x181C, 0x00, 0x1000, 0x00 },
	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1828, 0x00, 0x1000, 0x00 },
	[BAM_P_FIFO_SIZES]	= { 0x1820, 0x00, 0x1000, 0x00 },
};
180

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static const struct reg_offset_data bam_v1_7_reg_info[] = {
	[BAM_CTRL]		= { 0x00000, 0x00, 0x00, 0x00 },
	[BAM_REVISION]		= { 0x01000, 0x00, 0x00, 0x00 },
	[BAM_NUM_PIPES]		= { 0x01008, 0x00, 0x00, 0x00 },
	[BAM_DESC_CNT_TRSHLD]	= { 0x00008, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS]		= { 0x03010, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_MSK]	= { 0x03014, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_UNMASKED]	= { 0x03018, 0x00, 0x00, 0x00 },
	[BAM_IRQ_STTS]		= { 0x00014, 0x00, 0x00, 0x00 },
	[BAM_IRQ_CLR]		= { 0x00018, 0x00, 0x00, 0x00 },
	[BAM_IRQ_EN]		= { 0x0001C, 0x00, 0x00, 0x00 },
	[BAM_CNFG_BITS]		= { 0x0007C, 0x00, 0x00, 0x00 },
	[BAM_IRQ_SRCS_EE]	= { 0x03000, 0x00, 0x00, 0x1000 },
	[BAM_IRQ_SRCS_MSK_EE]	= { 0x03004, 0x00, 0x00, 0x1000 },
	[BAM_P_CTRL]		= { 0x13000, 0x1000, 0x00, 0x00 },
	[BAM_P_RST]		= { 0x13004, 0x1000, 0x00, 0x00 },
	[BAM_P_HALT]		= { 0x13008, 0x1000, 0x00, 0x00 },
	[BAM_P_IRQ_STTS]	= { 0x13010, 0x1000, 0x00, 0x00 },
	[BAM_P_IRQ_CLR]		= { 0x13014, 0x1000, 0x00, 0x00 },
	[BAM_P_IRQ_EN]		= { 0x13018, 0x1000, 0x00, 0x00 },
	[BAM_P_EVNT_DEST_ADDR]	= { 0x1382C, 0x00, 0x1000, 0x00 },
	[BAM_P_EVNT_REG]	= { 0x13818, 0x00, 0x1000, 0x00 },
	[BAM_P_SW_OFSTS]	= { 0x13800, 0x00, 0x1000, 0x00 },
	[BAM_P_DATA_FIFO_ADDR]	= { 0x13824, 0x00, 0x1000, 0x00 },
	[BAM_P_DESC_FIFO_ADDR]	= { 0x1381C, 0x00, 0x1000, 0x00 },
	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x13828, 0x00, 0x1000, 0x00 },
	[BAM_P_FIFO_SIZES]	= { 0x13820, 0x00, 0x1000, 0x00 },
};

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/* BAM CTRL */
#define BAM_SW_RST			BIT(0)
#define BAM_EN				BIT(1)
#define BAM_EN_ACCUM			BIT(4)
#define BAM_TESTBUS_SEL_SHIFT		5
#define BAM_TESTBUS_SEL_MASK		0x3F
#define BAM_DESC_CACHE_SEL_SHIFT	13
#define BAM_DESC_CACHE_SEL_MASK		0x3
#define BAM_CACHED_DESC_STORE		BIT(15)
#define IBC_DISABLE			BIT(16)

/* BAM REVISION */
#define REVISION_SHIFT		0
#define REVISION_MASK		0xFF
#define NUM_EES_SHIFT		8
#define NUM_EES_MASK		0xF
#define CE_BUFFER_SIZE		BIT(13)
#define AXI_ACTIVE		BIT(14)
#define USE_VMIDMT		BIT(15)
#define SECURED			BIT(16)
#define BAM_HAS_NO_BYPASS	BIT(17)
#define HIGH_FREQUENCY_BAM	BIT(18)
#define INACTIV_TMRS_EXST	BIT(19)
#define NUM_INACTIV_TMRS	BIT(20)
#define DESC_CACHE_DEPTH_SHIFT	21
#define DESC_CACHE_DEPTH_1	(0 << DESC_CACHE_DEPTH_SHIFT)
#define DESC_CACHE_DEPTH_2	(1 << DESC_CACHE_DEPTH_SHIFT)
#define DESC_CACHE_DEPTH_3	(2 << DESC_CACHE_DEPTH_SHIFT)
#define DESC_CACHE_DEPTH_4	(3 << DESC_CACHE_DEPTH_SHIFT)
#define CMD_DESC_EN		BIT(23)
#define INACTIV_TMR_BASE_SHIFT	24
#define INACTIV_TMR_BASE_MASK	0xFF

/* BAM NUM PIPES */
#define BAM_NUM_PIPES_SHIFT		0
#define BAM_NUM_PIPES_MASK		0xFF
#define PERIPH_NON_PIPE_GRP_SHIFT	16
#define PERIPH_NON_PIP_GRP_MASK		0xFF
#define BAM_NON_PIPE_GRP_SHIFT		24
#define BAM_NON_PIPE_GRP_MASK		0xFF

/* BAM CNFG BITS */
#define BAM_PIPE_CNFG		BIT(2)
#define BAM_FULL_PIPE		BIT(11)
#define BAM_NO_EXT_P_RST	BIT(12)
#define BAM_IBC_DISABLE		BIT(13)
#define BAM_SB_CLK_REQ		BIT(14)
#define BAM_PSM_CSW_REQ		BIT(15)
#define BAM_PSM_P_RES		BIT(16)
#define BAM_AU_P_RES		BIT(17)
#define BAM_SI_P_RES		BIT(18)
#define BAM_WB_P_RES		BIT(19)
#define BAM_WB_BLK_CSW		BIT(20)
#define BAM_WB_CSW_ACK_IDL	BIT(21)
#define BAM_WB_RETR_SVPNT	BIT(22)
#define BAM_WB_DSC_AVL_P_RST	BIT(23)
#define BAM_REG_P_EN		BIT(24)
#define BAM_PSM_P_HD_DATA	BIT(25)
#define BAM_AU_ACCUMED		BIT(26)
#define BAM_CMD_ENABLE		BIT(27)

#define BAM_CNFG_BITS_DEFAULT	(BAM_PIPE_CNFG |	\
				 BAM_NO_EXT_P_RST |	\
				 BAM_IBC_DISABLE |	\
				 BAM_SB_CLK_REQ |	\
				 BAM_PSM_CSW_REQ |	\
				 BAM_PSM_P_RES |	\
				 BAM_AU_P_RES |		\
				 BAM_SI_P_RES |		\
				 BAM_WB_P_RES |		\
				 BAM_WB_BLK_CSW |	\
				 BAM_WB_CSW_ACK_IDL |	\
				 BAM_WB_RETR_SVPNT |	\
				 BAM_WB_DSC_AVL_P_RST |	\
				 BAM_REG_P_EN |		\
				 BAM_PSM_P_HD_DATA |	\
				 BAM_AU_ACCUMED |	\
				 BAM_CMD_ENABLE)

/* PIPE CTRL */
#define P_EN			BIT(1)
#define P_DIRECTION		BIT(3)
#define P_SYS_STRM		BIT(4)
#define P_SYS_MODE		BIT(5)
#define P_AUTO_EOB		BIT(6)
#define P_AUTO_EOB_SEL_SHIFT	7
#define P_AUTO_EOB_SEL_512	(0 << P_AUTO_EOB_SEL_SHIFT)
#define P_AUTO_EOB_SEL_256	(1 << P_AUTO_EOB_SEL_SHIFT)
#define P_AUTO_EOB_SEL_128	(2 << P_AUTO_EOB_SEL_SHIFT)
#define P_AUTO_EOB_SEL_64	(3 << P_AUTO_EOB_SEL_SHIFT)
#define P_PREFETCH_LIMIT_SHIFT	9
#define P_PREFETCH_LIMIT_32	(0 << P_PREFETCH_LIMIT_SHIFT)
#define P_PREFETCH_LIMIT_16	(1 << P_PREFETCH_LIMIT_SHIFT)
#define P_PREFETCH_LIMIT_4	(2 << P_PREFETCH_LIMIT_SHIFT)
#define P_WRITE_NWD		BIT(11)
#define P_LOCK_GROUP_SHIFT	16
#define P_LOCK_GROUP_MASK	0x1F

/* BAM_DESC_CNT_TRSHLD */
#define CNT_TRSHLD		0xffff
#define DEFAULT_CNT_THRSHLD	0x4

/* BAM_IRQ_SRCS */
#define BAM_IRQ			BIT(31)
#define P_IRQ			0x7fffffff

/* BAM_IRQ_SRCS_MSK */
#define BAM_IRQ_MSK		BAM_IRQ
#define P_IRQ_MSK		P_IRQ

/* BAM_IRQ_STTS */
#define BAM_TIMER_IRQ		BIT(4)
#define BAM_EMPTY_IRQ		BIT(3)
#define BAM_ERROR_IRQ		BIT(2)
#define BAM_HRESP_ERR_IRQ	BIT(1)

/* BAM_IRQ_CLR */
#define BAM_TIMER_CLR		BIT(4)
#define BAM_EMPTY_CLR		BIT(3)
#define BAM_ERROR_CLR		BIT(2)
#define BAM_HRESP_ERR_CLR	BIT(1)

/* BAM_IRQ_EN */
#define BAM_TIMER_EN		BIT(4)
#define BAM_EMPTY_EN		BIT(3)
#define BAM_ERROR_EN		BIT(2)
#define BAM_HRESP_ERR_EN	BIT(1)

/* BAM_P_IRQ_EN */
#define P_PRCSD_DESC_EN		BIT(0)
#define P_TIMER_EN		BIT(1)
#define P_WAKE_EN		BIT(2)
#define P_OUT_OF_DESC_EN	BIT(3)
#define P_ERR_EN		BIT(4)
#define P_TRNSFR_END_EN		BIT(5)
#define P_DEFAULT_IRQS_EN	(P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)

/* BAM_P_SW_OFSTS */
#define P_SW_OFSTS_MASK		0xffff

#define BAM_DESC_FIFO_SIZE	SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
352
#define BAM_FIFO_SIZE	(SZ_32K - 8)
353 354
#define IS_BUSY(chan)	(CIRC_SPACE(bchan->tail, bchan->head,\
			 MAX_DESCRIPTORS + 1) == 0)
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

struct bam_chan {
	struct virt_dma_chan vc;

	struct bam_device *bdev;

	/* configuration from device tree */
	u32 id;

	/* runtime configuration */
	struct dma_slave_config slave;

	/* fifo storage */
	struct bam_desc_hw *fifo_virt;
	dma_addr_t fifo_phys;

	/* fifo markers */
	unsigned short head;		/* start of active descriptor entries */
	unsigned short tail;		/* end of active descriptor entries */

	unsigned int initialized;	/* is the channel hw initialized? */
	unsigned int paused;		/* is the channel paused? */
	unsigned int reconfigure;	/* new slave config? */
378 379
	/* list of descriptors currently processed */
	struct list_head desc_list;
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395

	struct list_head node;
};

static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
{
	return container_of(common, struct bam_chan, vc.chan);
}

struct bam_device {
	void __iomem *regs;
	struct device *dev;
	struct dma_device common;
	struct device_dma_parameters dma_parms;
	struct bam_chan *channels;
	u32 num_channels;
396
	u32 num_ees;
397 398 399

	/* execution environment ID, from DT */
	u32 ee;
400
	bool controlled_remotely;
401

402 403
	const struct reg_offset_data *layout;

404 405 406 407 408 409 410
	struct clk *bamclk;
	int irq;

	/* dma start transaction tasklet */
	struct tasklet_struct task;
};

411 412 413 414 415 416 417 418 419
/**
 * bam_addr - returns BAM register address
 * @bdev: bam device
 * @pipe: pipe instance (ignored when register doesn't have multiple instances)
 * @reg:  register enum
 */
static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
		enum bam_reg reg)
{
420
	const struct reg_offset_data r = bdev->layout[reg];
421 422 423 424 425 426 427

	return bdev->regs + r.base_offset +
		r.pipe_mult * pipe +
		r.evnt_mult * pipe +
		r.ee_mult * bdev->ee;
}

428 429 430 431 432 433 434 435 436 437 438 439 440
/**
 * bam_reset_channel - Reset individual BAM DMA channel
 * @bchan: bam channel
 *
 * This function resets a specific BAM channel
 */
static void bam_reset_channel(struct bam_chan *bchan)
{
	struct bam_device *bdev = bchan->bdev;

	lockdep_assert_held(&bchan->vc.lock);

	/* reset channel */
441 442
	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
443 444 445 446 447 448 449 450 451 452 453

	/* don't allow cpu to reorder BAM register accesses done after this */
	wmb();

	/* make sure hw is initialized when channel is used the first time  */
	bchan->initialized = 0;
}

/**
 * bam_chan_init_hw - Initialize channel hardware
 * @bchan: bam channel
454
 * @dir: DMA transfer direction
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
 *
 * This function resets and initializes the BAM channel
 */
static void bam_chan_init_hw(struct bam_chan *bchan,
	enum dma_transfer_direction dir)
{
	struct bam_device *bdev = bchan->bdev;
	u32 val;

	/* Reset the channel to clear internal state of the FIFO */
	bam_reset_channel(bchan);

	/*
	 * write out 8 byte aligned address.  We have enough space for this
	 * because we allocated 1 more descriptor (8 bytes) than we can use
	 */
	writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
472
			bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
473
	writel_relaxed(BAM_FIFO_SIZE,
474
			bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
475 476

	/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
477 478
	writel_relaxed(P_DEFAULT_IRQS_EN,
			bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
479 480

	/* unmask the specific pipe and EE combo */
481
	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
482
	val |= BIT(bchan->id);
483
	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
484 485 486 487 488 489 490 491 492

	/* don't allow cpu to reorder the channel enable done below */
	wmb();

	/* set fixed direction and mode, then enable channel */
	val = P_EN | P_SYS_MODE;
	if (dir == DMA_DEV_TO_MEM)
		val |= P_DIRECTION;

493
	writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516

	bchan->initialized = 1;

	/* init FIFO pointers */
	bchan->head = 0;
	bchan->tail = 0;
}

/**
 * bam_alloc_chan - Allocate channel resources for DMA channel.
 * @chan: specified channel
 *
 * This function allocates the FIFO descriptor memory
 */
static int bam_alloc_chan(struct dma_chan *chan)
{
	struct bam_chan *bchan = to_bam_chan(chan);
	struct bam_device *bdev = bchan->bdev;

	if (bchan->fifo_virt)
		return 0;

	/* allocate FIFO descriptor space, but only if necessary */
517 518
	bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
					&bchan->fifo_phys, GFP_KERNEL);
519 520 521 522 523 524 525 526 527

	if (!bchan->fifo_virt) {
		dev_err(bdev->dev, "Failed to allocate desc fifo\n");
		return -ENOMEM;
	}

	return 0;
}

528 529 530 531 532 533 534 535
static int bam_pm_runtime_get_sync(struct device *dev)
{
	if (pm_runtime_enabled(dev))
		return pm_runtime_get_sync(dev);

	return 0;
}

536 537 538 539 540 541 542 543 544 545 546 547 548
/**
 * bam_free_chan - Frees dma resources associated with specific channel
 * @chan: specified channel
 *
 * Free the allocated fifo descriptor memory and channel resources
 *
 */
static void bam_free_chan(struct dma_chan *chan)
{
	struct bam_chan *bchan = to_bam_chan(chan);
	struct bam_device *bdev = bchan->bdev;
	u32 val;
	unsigned long flags;
549 550
	int ret;

551
	ret = bam_pm_runtime_get_sync(bdev->dev);
552 553
	if (ret < 0)
		return;
554 555 556

	vchan_free_chan_resources(to_virt_chan(chan));

557
	if (!list_empty(&bchan->desc_list)) {
558
		dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
559
		goto err;
560 561 562 563 564 565
	}

	spin_lock_irqsave(&bchan->vc.lock, flags);
	bam_reset_channel(bchan);
	spin_unlock_irqrestore(&bchan->vc.lock, flags);

566 567
	dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
		    bchan->fifo_phys);
568 569 570
	bchan->fifo_virt = NULL;

	/* mask irq for pipe/channel */
571
	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
572
	val &= ~BIT(bchan->id);
573
	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
574 575

	/* disable irq */
576
	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
577 578 579 580

err:
	pm_runtime_mark_last_busy(bdev->dev);
	pm_runtime_put_autosuspend(bdev->dev);
581 582 583 584 585 586 587 588 589 590
}

/**
 * bam_slave_config - set slave configuration for channel
 * @chan: dma channel
 * @cfg: slave configuration
 *
 * Sets slave configuration for channel
 *
 */
591 592
static int bam_slave_config(struct dma_chan *chan,
			    struct dma_slave_config *cfg)
593
{
594 595 596 597
	struct bam_chan *bchan = to_bam_chan(chan);
	unsigned long flag;

	spin_lock_irqsave(&bchan->vc.lock, flag);
598 599
	memcpy(&bchan->slave, cfg, sizeof(*cfg));
	bchan->reconfigure = 1;
600 601 602
	spin_unlock_irqrestore(&bchan->vc.lock, flag);

	return 0;
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
}

/**
 * bam_prep_slave_sg - Prep slave sg transaction
 *
 * @chan: dma channel
 * @sgl: scatter gather list
 * @sg_len: length of sg
 * @direction: DMA transfer direction
 * @flags: DMA flags
 * @context: transfer context (unused)
 */
static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
	struct scatterlist *sgl, unsigned int sg_len,
	enum dma_transfer_direction direction, unsigned long flags,
	void *context)
{
	struct bam_chan *bchan = to_bam_chan(chan);
	struct bam_device *bdev = bchan->bdev;
	struct bam_async_desc *async_desc;
	struct scatterlist *sg;
	u32 i;
	struct bam_desc_hw *desc;
	unsigned int num_alloc = 0;


	if (!is_slave_direction(direction)) {
		dev_err(bdev->dev, "invalid dma direction\n");
		return NULL;
	}

	/* calculate number of required entries */
	for_each_sg(sgl, sg, sg_len, i)
636
		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
637 638 639 640 641 642 643 644

	/* allocate enough room to accomodate the number of entries */
	async_desc = kzalloc(sizeof(*async_desc) +
			(num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);

	if (!async_desc)
		goto err_out;

645 646 647 648 649 650
	if (flags & DMA_PREP_FENCE)
		async_desc->flags |= DESC_FLAG_NWD;

	if (flags & DMA_PREP_INTERRUPT)
		async_desc->flags |= DESC_FLAG_EOT;

651 652 653 654 655 656 657 658 659 660 661
	async_desc->num_desc = num_alloc;
	async_desc->curr_desc = async_desc->desc;
	async_desc->dir = direction;

	/* fill in temporary descriptors */
	desc = async_desc->desc;
	for_each_sg(sgl, sg, sg_len, i) {
		unsigned int remainder = sg_dma_len(sg);
		unsigned int curr_offset = 0;

		do {
662 663 664
			if (flags & DMA_PREP_CMD)
				desc->flags |= cpu_to_le16(DESC_FLAG_CMD);

665 666
			desc->addr = cpu_to_le32(sg_dma_address(sg) +
						 curr_offset);
667

668 669 670 671
			if (remainder > BAM_FIFO_SIZE) {
				desc->size = cpu_to_le16(BAM_FIFO_SIZE);
				remainder -= BAM_FIFO_SIZE;
				curr_offset += BAM_FIFO_SIZE;
672
			} else {
673
				desc->size = cpu_to_le16(remainder);
674 675 676
				remainder = 0;
			}

677
			async_desc->length += le16_to_cpu(desc->size);
678 679 680 681 682 683 684 685 686 687 688 689 690
			desc++;
		} while (remainder > 0);
	}

	return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);

err_out:
	kfree(async_desc);
	return NULL;
}

/**
 * bam_dma_terminate_all - terminate all transactions on a channel
691
 * @chan: bam dma channel
692 693 694 695 696
 *
 * Dequeues and frees all transactions
 * No callbacks are done
 *
 */
697
static int bam_dma_terminate_all(struct dma_chan *chan)
698
{
699
	struct bam_chan *bchan = to_bam_chan(chan);
700
	struct bam_async_desc *async_desc, *tmp;
701 702 703 704 705
	unsigned long flag;
	LIST_HEAD(head);

	/* remove all transactions, including active transaction */
	spin_lock_irqsave(&bchan->vc.lock, flag);
706 707 708 709
	list_for_each_entry_safe(async_desc, tmp,
				 &bchan->desc_list, desc_node) {
		list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
		list_del(&async_desc->desc_node);
710 711 712 713 714 715
	}

	vchan_get_all_descriptors(&bchan->vc, &head);
	spin_unlock_irqrestore(&bchan->vc.lock, flag);

	vchan_dma_desc_free_list(&bchan->vc, &head);
716 717

	return 0;
718 719 720
}

/**
721
 * bam_pause - Pause DMA channel
722 723
 * @chan: dma channel
 *
724 725 726 727 728 729
 */
static int bam_pause(struct dma_chan *chan)
{
	struct bam_chan *bchan = to_bam_chan(chan);
	struct bam_device *bdev = bchan->bdev;
	unsigned long flag;
730 731
	int ret;

732
	ret = bam_pm_runtime_get_sync(bdev->dev);
733 734
	if (ret < 0)
		return ret;
735 736 737 738 739

	spin_lock_irqsave(&bchan->vc.lock, flag);
	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
	bchan->paused = 1;
	spin_unlock_irqrestore(&bchan->vc.lock, flag);
740 741
	pm_runtime_mark_last_busy(bdev->dev);
	pm_runtime_put_autosuspend(bdev->dev);
742 743 744 745 746 747 748

	return 0;
}

/**
 * bam_resume - Resume DMA channel operations
 * @chan: dma channel
749 750
 *
 */
751
static int bam_resume(struct dma_chan *chan)
752 753 754 755
{
	struct bam_chan *bchan = to_bam_chan(chan);
	struct bam_device *bdev = bchan->bdev;
	unsigned long flag;
756 757
	int ret;

758
	ret = bam_pm_runtime_get_sync(bdev->dev);
759 760
	if (ret < 0)
		return ret;
761

762 763 764 765
	spin_lock_irqsave(&bchan->vc.lock, flag);
	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
	bchan->paused = 0;
	spin_unlock_irqrestore(&bchan->vc.lock, flag);
766 767
	pm_runtime_mark_last_busy(bdev->dev);
	pm_runtime_put_autosuspend(bdev->dev);
768

769
	return 0;
770 771 772 773 774 775 776 777 778 779 780
}

/**
 * process_channel_irqs - processes the channel interrupts
 * @bdev: bam controller
 *
 * This function processes the channel interrupts
 *
 */
static u32 process_channel_irqs(struct bam_device *bdev)
{
781
	u32 i, srcs, pipe_stts, offset, avail;
782
	unsigned long flags;
783
	struct bam_async_desc *async_desc, *tmp;
784

785
	srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
786 787 788 789 790 791 792 793 794 795 796 797

	/* return early if no pipe/channel interrupts are present */
	if (!(srcs & P_IRQ))
		return srcs;

	for (i = 0; i < bdev->num_channels; i++) {
		struct bam_chan *bchan = &bdev->channels[i];

		if (!(srcs & BIT(i)))
			continue;

		/* clear pipe irq */
798
		pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
799

800
		writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
801 802 803

		spin_lock_irqsave(&bchan->vc.lock, flags);

804 805 806 807 808 809 810
		offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
				       P_SW_OFSTS_MASK;
		offset /= sizeof(struct bam_desc_hw);

		/* Number of bytes available to read */
		avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);

811 812 813
		if (offset < bchan->head)
			avail--;

814 815 816 817 818
		list_for_each_entry_safe(async_desc, tmp,
					 &bchan->desc_list, desc_node) {
			/* Not enough data to read */
			if (avail < async_desc->xfer_len)
				break;
819 820 821 822 823

			/* manage FIFO */
			bchan->head += async_desc->xfer_len;
			bchan->head %= MAX_DESCRIPTORS;

824 825 826 827
			async_desc->num_desc -= async_desc->xfer_len;
			async_desc->curr_desc += async_desc->xfer_len;
			avail -= async_desc->xfer_len;

828
			/*
829
			 * if complete, process cookie. Otherwise
830 831 832
			 * push back to front of desc_issued so that
			 * it gets restarted by the tasklet
			 */
833
			if (!async_desc->num_desc) {
834
				vchan_cookie_complete(&async_desc->vd);
835
			} else {
836
				list_add(&async_desc->vd.node,
837 838 839
					 &bchan->vc.desc_issued);
			}
			list_del(&async_desc->desc_node);
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
		}

		spin_unlock_irqrestore(&bchan->vc.lock, flags);
	}

	return srcs;
}

/**
 * bam_dma_irq - irq handler for bam controller
 * @irq: IRQ of interrupt
 * @data: callback data
 *
 * IRQ handler for the bam controller
 */
static irqreturn_t bam_dma_irq(int irq, void *data)
{
	struct bam_device *bdev = data;
	u32 clr_mask = 0, srcs = 0;
859
	int ret;
860 861 862 863 864 865 866

	srcs |= process_channel_irqs(bdev);

	/* kick off tasklet to start next dma transfer */
	if (srcs & P_IRQ)
		tasklet_schedule(&bdev->task);

867
	ret = bam_pm_runtime_get_sync(bdev->dev);
868 869 870
	if (ret < 0)
		return ret;

871
	if (srcs & BAM_IRQ) {
872
		clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
873

874 875 876 877 878
		/*
		 * don't allow reorder of the various accesses to the BAM
		 * registers
		 */
		mb();
879

880 881
		writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
	}
882

883 884 885
	pm_runtime_mark_last_busy(bdev->dev);
	pm_runtime_put_autosuspend(bdev->dev);

886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
	return IRQ_HANDLED;
}

/**
 * bam_tx_status - returns status of transaction
 * @chan: dma channel
 * @cookie: transaction cookie
 * @txstate: DMA transaction state
 *
 * Return status of dma transaction
 */
static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
		struct dma_tx_state *txstate)
{
	struct bam_chan *bchan = to_bam_chan(chan);
901
	struct bam_async_desc *async_desc;
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
	struct virt_dma_desc *vd;
	int ret;
	size_t residue = 0;
	unsigned int i;
	unsigned long flags;

	ret = dma_cookie_status(chan, cookie, txstate);
	if (ret == DMA_COMPLETE)
		return ret;

	if (!txstate)
		return bchan->paused ? DMA_PAUSED : ret;

	spin_lock_irqsave(&bchan->vc.lock, flags);
	vd = vchan_find_desc(&bchan->vc, cookie);
917
	if (vd) {
918
		residue = container_of(vd, struct bam_async_desc, vd)->length;
919 920 921 922 923 924
	} else {
		list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
			if (async_desc->vd.tx.cookie != cookie)
				continue;

			for (i = 0; i < async_desc->num_desc; i++)
925 926
				residue += le16_to_cpu(
						async_desc->curr_desc[i].size);
927 928
		}
	}
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950

	spin_unlock_irqrestore(&bchan->vc.lock, flags);

	dma_set_residue(txstate, residue);

	if (ret == DMA_IN_PROGRESS && bchan->paused)
		ret = DMA_PAUSED;

	return ret;
}

/**
 * bam_apply_new_config
 * @bchan: bam dma channel
 * @dir: DMA direction
 */
static void bam_apply_new_config(struct bam_chan *bchan,
	enum dma_transfer_direction dir)
{
	struct bam_device *bdev = bchan->bdev;
	u32 maxburst;

951 952 953 954 955 956 957 958 959
	if (!bdev->controlled_remotely) {
		if (dir == DMA_DEV_TO_MEM)
			maxburst = bchan->slave.src_maxburst;
		else
			maxburst = bchan->slave.dst_maxburst;

		writel_relaxed(maxburst,
			       bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
	}
960 961 962 963 964 965

	bchan->reconfigure = 0;
}

/**
 * bam_start_dma - start next transaction
966
 * @bchan: bam dma channel
967 968 969 970 971
 */
static void bam_start_dma(struct bam_chan *bchan)
{
	struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
	struct bam_device *bdev = bchan->bdev;
972
	struct bam_async_desc *async_desc = NULL;
973 974 975
	struct bam_desc_hw *desc;
	struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
					sizeof(struct bam_desc_hw));
976
	int ret;
977 978
	unsigned int avail;
	struct dmaengine_desc_callback cb;
979 980 981 982 983 984

	lockdep_assert_held(&bchan->vc.lock);

	if (!vd)
		return;

985
	ret = bam_pm_runtime_get_sync(bdev->dev);
986 987 988
	if (ret < 0)
		return;

989 990
	while (vd && !IS_BUSY(bchan)) {
		list_del(&vd->node);
991

992
		async_desc = container_of(vd, struct bam_async_desc, vd);
993

994 995 996
		/* on first use, initialize the channel hardware */
		if (!bchan->initialized)
			bam_chan_init_hw(bchan, async_desc->dir);
997

998 999 1000
		/* apply new slave config changes, if necessary */
		if (bchan->reconfigure)
			bam_apply_new_config(bchan, async_desc->dir);
1001

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		desc = async_desc->curr_desc;
		avail = CIRC_SPACE(bchan->tail, bchan->head,
				   MAX_DESCRIPTORS + 1);

		if (async_desc->num_desc > avail)
			async_desc->xfer_len = avail;
		else
			async_desc->xfer_len = async_desc->num_desc;

		/* set any special flags on the last descriptor */
		if (async_desc->num_desc == async_desc->xfer_len)
			desc[async_desc->xfer_len - 1].flags |=
						cpu_to_le16(async_desc->flags);
1015

1016
		vd = vchan_next_desc(&bchan->vc);
1017

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);

		/*
		 * An interrupt is generated at this desc, if
		 *  - FIFO is FULL.
		 *  - No more descriptors to add.
		 *  - If a callback completion was requested for this DESC,
		 *     In this case, BAM will deliver the completion callback
		 *     for this desc and continue processing the next desc.
		 */
		if (((avail <= async_desc->xfer_len) || !vd ||
		     dmaengine_desc_callback_valid(&cb)) &&
		    !(async_desc->flags & DESC_FLAG_EOT))
			desc[async_desc->xfer_len - 1].flags |=
				cpu_to_le16(DESC_FLAG_INT);

		if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
			u32 partial = MAX_DESCRIPTORS - bchan->tail;

			memcpy(&fifo[bchan->tail], desc,
			       partial * sizeof(struct bam_desc_hw));
			memcpy(fifo, &desc[partial],
			       (async_desc->xfer_len - partial) *
1041
				sizeof(struct bam_desc_hw));
1042 1043 1044 1045 1046
		} else {
			memcpy(&fifo[bchan->tail], desc,
			       async_desc->xfer_len *
			       sizeof(struct bam_desc_hw));
		}
1047

1048 1049 1050 1051
		bchan->tail += async_desc->xfer_len;
		bchan->tail %= MAX_DESCRIPTORS;
		list_add_tail(&async_desc->desc_node, &bchan->desc_list);
	}
1052 1053 1054 1055

	/* ensure descriptor writes and dma start not reordered */
	wmb();
	writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
1056
			bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
1057 1058 1059

	pm_runtime_mark_last_busy(bdev->dev);
	pm_runtime_put_autosuspend(bdev->dev);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
}

/**
 * dma_tasklet - DMA IRQ tasklet
 * @data: tasklet argument (bam controller structure)
 *
 * Sets up next DMA operation and then processes all completed transactions
 */
static void dma_tasklet(unsigned long data)
{
	struct bam_device *bdev = (struct bam_device *)data;
	struct bam_chan *bchan;
	unsigned long flags;
	unsigned int i;

	/* go through the channels and kick off transactions */
	for (i = 0; i < bdev->num_channels; i++) {
		bchan = &bdev->channels[i];
		spin_lock_irqsave(&bchan->vc.lock, flags);

1080
		if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
1081 1082 1083
			bam_start_dma(bchan);
		spin_unlock_irqrestore(&bchan->vc.lock, flags);
	}
1084

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
}

/**
 * bam_issue_pending - starts pending transactions
 * @chan: dma channel
 *
 * Calls tasklet directly which in turn starts any pending transactions
 */
static void bam_issue_pending(struct dma_chan *chan)
{
	struct bam_chan *bchan = to_bam_chan(chan);
	unsigned long flags;

	spin_lock_irqsave(&bchan->vc.lock, flags);

	/* if work pending and idle, start a transaction */
1101
	if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		bam_start_dma(bchan);

	spin_unlock_irqrestore(&bchan->vc.lock, flags);
}

/**
 * bam_dma_free_desc - free descriptor memory
 * @vd: virtual descriptor
 *
 */
static void bam_dma_free_desc(struct virt_dma_desc *vd)
{
	struct bam_async_desc *async_desc = container_of(vd,
			struct bam_async_desc, vd);

	kfree(async_desc);
}

static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
		struct of_dma *of)
{
	struct bam_device *bdev = container_of(of->of_dma_data,
					struct bam_device, common);
	unsigned int request;

	if (dma_spec->args_count != 1)
		return NULL;

	request = dma_spec->args[0];
	if (request >= bdev->num_channels)
		return NULL;

	return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
}

/**
 * bam_init
 * @bdev: bam device
 *
 * Initialization helper for global bam registers
 */
static int bam_init(struct bam_device *bdev)
{
	u32 val;

	/* read revision and configuration information */
1148 1149 1150 1151
	if (!bdev->num_ees) {
		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
	}
1152 1153

	/* check that configured EE is within range */
1154
	if (bdev->ee >= bdev->num_ees)
1155 1156
		return -EINVAL;

1157 1158 1159 1160
	if (!bdev->num_channels) {
		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
	}
1161

1162 1163 1164
	if (bdev->controlled_remotely)
		return 0;

1165 1166
	/* s/w reset bam */
	/* after reset all pipes are disabled and idle */
1167
	val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
1168
	val |= BAM_SW_RST;
1169
	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1170
	val &= ~BAM_SW_RST;
1171
	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1172 1173 1174 1175 1176 1177

	/* make sure previous stores are visible before enabling BAM */
	wmb();

	/* enable bam */
	val |= BAM_EN;
1178
	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1179 1180

	/* set descriptor threshhold, start with 4 bytes */
1181 1182
	writel_relaxed(DEFAULT_CNT_THRSHLD,
			bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
1183 1184

	/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
1185
	writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
1186 1187 1188

	/* enable irqs for errors */
	writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
1189
			bam_addr(bdev, 0, BAM_IRQ_EN));
1190 1191

	/* unmask global bam interrupt */
1192
	writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204

	return 0;
}

static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
	u32 index)
{
	bchan->id = index;
	bchan->bdev = bdev;

	vchan_init(&bchan->vc, &bdev->common);
	bchan->vc.desc_free = bam_dma_free_desc;
1205
	INIT_LIST_HEAD(&bchan->desc_list);
1206 1207
}

1208 1209 1210
static const struct of_device_id bam_of_match[] = {
	{ .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
	{ .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1211
	{ .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
1212 1213 1214 1215 1216
	{}
};

MODULE_DEVICE_TABLE(of, bam_of_match);

1217 1218 1219
static int bam_dma_probe(struct platform_device *pdev)
{
	struct bam_device *bdev;
1220
	const struct of_device_id *match;
1221 1222 1223 1224 1225 1226 1227 1228 1229
	struct resource *iores;
	int ret, i;

	bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
	if (!bdev)
		return -ENOMEM;

	bdev->dev = &pdev->dev;

1230 1231 1232 1233 1234 1235 1236 1237
	match = of_match_node(bam_of_match, pdev->dev.of_node);
	if (!match) {
		dev_err(&pdev->dev, "Unsupported BAM module\n");
		return -ENODEV;
	}

	bdev->layout = match->data;

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
	if (IS_ERR(bdev->regs))
		return PTR_ERR(bdev->regs);

	bdev->irq = platform_get_irq(pdev, 0);
	if (bdev->irq < 0)
		return bdev->irq;

	ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
	if (ret) {
		dev_err(bdev->dev, "Execution environment unspecified\n");
		return ret;
	}

1253 1254 1255
	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
						"qcom,controlled-remotely");

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
	if (bdev->controlled_remotely) {
		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
					   &bdev->num_channels);
		if (ret)
			dev_err(bdev->dev, "num-channels unspecified in dt\n");

		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
					   &bdev->num_ees);
		if (ret)
			dev_err(bdev->dev, "num-ees unspecified in dt\n");
	}

1268
	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1269 1270 1271 1272 1273 1274
	if (IS_ERR(bdev->bamclk)) {
		if (!bdev->controlled_remotely)
			return PTR_ERR(bdev->bamclk);

		bdev->bamclk = NULL;
	}
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292

	ret = clk_prepare_enable(bdev->bamclk);
	if (ret) {
		dev_err(bdev->dev, "failed to prepare/enable clock\n");
		return ret;
	}

	ret = bam_init(bdev);
	if (ret)
		goto err_disable_clk;

	tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);

	bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
				sizeof(*bdev->channels), GFP_KERNEL);

	if (!bdev->channels) {
		ret = -ENOMEM;
1293
		goto err_tasklet_kill;
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
	}

	/* allocate and initialize channels */
	INIT_LIST_HEAD(&bdev->common.channels);

	for (i = 0; i < bdev->num_channels; i++)
		bam_channel_init(bdev, &bdev->channels[i], i);

	ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
			IRQF_TRIGGER_HIGH, "bam_dma", bdev);
	if (ret)
1305
		goto err_bam_channel_exit;
1306 1307 1308 1309

	/* set max dma segment size */
	bdev->common.dev = bdev->dev;
	bdev->common.dev->dma_parms = &bdev->dma_parms;
1310
	ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
1311 1312
	if (ret) {
		dev_err(bdev->dev, "cannot set maximum segment size\n");
1313
		goto err_bam_channel_exit;
1314 1315 1316 1317 1318 1319 1320 1321 1322
	}

	platform_set_drvdata(pdev, bdev);

	/* set capabilities */
	dma_cap_zero(bdev->common.cap_mask);
	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);

	/* initialize dmaengine apis */
1323 1324 1325 1326
	bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
	bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
	bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1327 1328 1329
	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
	bdev->common.device_free_chan_resources = bam_free_chan;
	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1330 1331 1332 1333
	bdev->common.device_config = bam_slave_config;
	bdev->common.device_pause = bam_pause;
	bdev->common.device_resume = bam_resume;
	bdev->common.device_terminate_all = bam_dma_terminate_all;
1334 1335 1336 1337 1338 1339 1340
	bdev->common.device_issue_pending = bam_issue_pending;
	bdev->common.device_tx_status = bam_tx_status;
	bdev->common.dev = bdev->dev;

	ret = dma_async_device_register(&bdev->common);
	if (ret) {
		dev_err(bdev->dev, "failed to register dma async device\n");
1341
		goto err_bam_channel_exit;
1342 1343 1344 1345 1346 1347 1348
	}

	ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
					&bdev->common);
	if (ret)
		goto err_unregister_dma;

1349 1350 1351 1352 1353
	if (bdev->controlled_remotely) {
		pm_runtime_disable(&pdev->dev);
		return 0;
	}

1354 1355 1356 1357 1358 1359 1360
	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_mark_last_busy(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

1361 1362 1363 1364
	return 0;

err_unregister_dma:
	dma_async_device_unregister(&bdev->common);
1365 1366 1367 1368 1369
err_bam_channel_exit:
	for (i = 0; i < bdev->num_channels; i++)
		tasklet_kill(&bdev->channels[i].vc.task);
err_tasklet_kill:
	tasklet_kill(&bdev->task);
1370 1371
err_disable_clk:
	clk_disable_unprepare(bdev->bamclk);
1372

1373 1374 1375 1376 1377 1378 1379 1380
	return ret;
}

static int bam_dma_remove(struct platform_device *pdev)
{
	struct bam_device *bdev = platform_get_drvdata(pdev);
	u32 i;

1381 1382
	pm_runtime_force_suspend(&pdev->dev);

1383 1384 1385 1386
	of_dma_controller_free(pdev->dev.of_node);
	dma_async_device_unregister(&bdev->common);

	/* mask all interrupts for this execution environment */
1387
	writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
1388 1389 1390 1391

	devm_free_irq(bdev->dev, bdev->irq, bdev);

	for (i = 0; i < bdev->num_channels; i++) {
1392
		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1393 1394
		tasklet_kill(&bdev->channels[i].vc.task);

1395 1396 1397
		if (!bdev->channels[i].fifo_virt)
			continue;

1398 1399 1400
		dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
			    bdev->channels[i].fifo_virt,
			    bdev->channels[i].fifo_phys);
1401 1402 1403 1404 1405 1406 1407 1408 1409
	}

	tasklet_kill(&bdev->task);

	clk_disable_unprepare(bdev->bamclk);

	return 0;
}

1410
static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
1411 1412 1413 1414 1415 1416 1417 1418
{
	struct bam_device *bdev = dev_get_drvdata(dev);

	clk_disable(bdev->bamclk);

	return 0;
}

1419
static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
{
	struct bam_device *bdev = dev_get_drvdata(dev);
	int ret;

	ret = clk_enable(bdev->bamclk);
	if (ret < 0) {
		dev_err(dev, "clk_enable failed: %d\n", ret);
		return ret;
	}

	return 0;
}
1432 1433

static int __maybe_unused bam_dma_suspend(struct device *dev)
1434 1435 1436
{
	struct bam_device *bdev = dev_get_drvdata(dev);

1437 1438
	if (!bdev->controlled_remotely)
		pm_runtime_force_suspend(dev);
1439 1440 1441 1442 1443 1444

	clk_unprepare(bdev->bamclk);

	return 0;
}

1445
static int __maybe_unused bam_dma_resume(struct device *dev)
1446 1447 1448 1449 1450 1451 1452 1453
{
	struct bam_device *bdev = dev_get_drvdata(dev);
	int ret;

	ret = clk_prepare(bdev->bamclk);
	if (ret)
		return ret;

1454 1455
	if (!bdev->controlled_remotely)
		pm_runtime_force_resume(dev);
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465

	return 0;
}

static const struct dev_pm_ops bam_dma_pm_ops = {
	SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
	SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
				NULL)
};

1466 1467 1468 1469 1470
static struct platform_driver bam_dma_driver = {
	.probe = bam_dma_probe,
	.remove = bam_dma_remove,
	.driver = {
		.name = "bam-dma-engine",
1471
		.pm = &bam_dma_pm_ops,
1472 1473 1474 1475 1476 1477 1478 1479 1480
		.of_match_table = bam_of_match,
	},
};

module_platform_driver(bam_dma_driver);

MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
MODULE_LICENSE("GPL v2");