ste_dma40_ll.c 11.1 KB
Newer Older
1
/*
2
 * Copyright (C) ST-Ericsson SA 2007-2010
3
 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4
 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 6 7 8
 * License terms: GNU General Public License (GPL) version 2
 */

#include <linux/kernel.h>
9
#include <linux/platform_data/dma-ste-dma40.h>
10 11 12

#include "ste_dma40_ll.h"

13 14 15 16 17 18 19 20 21 22 23 24
u8 d40_width_to_bits(enum dma_slave_buswidth width)
{
	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
		return STEDMA40_ESIZE_8_BIT;
	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
		return STEDMA40_ESIZE_16_BIT;
	else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
		return STEDMA40_ESIZE_64_BIT;
	else
		return STEDMA40_ESIZE_32_BIT;
}

25 26 27 28 29 30 31 32
/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
void d40_log_cfg(struct stedma40_chan_cfg *cfg,
		 u32 *lcsp1, u32 *lcsp3)
{
	u32 l3 = 0; /* dst */
	u32 l1 = 0; /* src */

	/* src is mem? -> increase address pos */
33 34
	if (cfg->dir ==  DMA_MEM_TO_DEV ||
	    cfg->dir ==  DMA_MEM_TO_MEM)
35
		l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS);
36 37

	/* dst is mem? -> increase address pos */
38 39
	if (cfg->dir ==  DMA_DEV_TO_MEM ||
	    cfg->dir ==  DMA_MEM_TO_MEM)
40
		l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS);
41 42

	/* src is hw? -> master port 1 */
43 44
	if (cfg->dir ==  DMA_DEV_TO_MEM ||
	    cfg->dir ==  DMA_DEV_TO_DEV)
45
		l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS);
46 47

	/* dst is hw? -> master port 1 */
48 49
	if (cfg->dir ==  DMA_MEM_TO_DEV ||
	    cfg->dir ==  DMA_DEV_TO_DEV)
50
		l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS);
51

52
	l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS);
53
	l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
54 55
	l3 |= d40_width_to_bits(cfg->dst_info.data_width)
		<< D40_MEM_LCSP3_DCFG_ESIZE_POS;
56

57
	l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS);
58
	l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
59 60
	l1 |= d40_width_to_bits(cfg->src_info.data_width)
		<< D40_MEM_LCSP1_SCFG_ESIZE_POS;
61 62 63 64 65 66

	*lcsp1 = l1;
	*lcsp3 = l3;

}

67
void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg)
68 69 70 71
{
	u32 src = 0;
	u32 dst = 0;

72 73
	if ((cfg->dir == DMA_DEV_TO_MEM) ||
	    (cfg->dir == DMA_DEV_TO_DEV)) {
74
		/* Set master port to 1 */
75
		src |= BIT(D40_SREG_CFG_MST_POS);
76 77 78
		src |= D40_TYPE_TO_EVENT(cfg->dev_type);

		if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
79
			src |= BIT(D40_SREG_CFG_PHY_TM_POS);
80 81 82
		else
			src |= 3 << D40_SREG_CFG_PHY_TM_POS;
	}
83 84
	if ((cfg->dir == DMA_MEM_TO_DEV) ||
	    (cfg->dir == DMA_DEV_TO_DEV)) {
85
		/* Set master port to 1 */
86
		dst |= BIT(D40_SREG_CFG_MST_POS);
87 88 89
		dst |= D40_TYPE_TO_EVENT(cfg->dev_type);

		if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
90
			dst |= BIT(D40_SREG_CFG_PHY_TM_POS);
91 92 93 94
		else
			dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
	}
	/* Interrupt on end of transfer for destination */
95
	dst |= BIT(D40_SREG_CFG_TIM_POS);
96 97

	/* Generate interrupt on error */
98 99
	src |= BIT(D40_SREG_CFG_EIM_POS);
	dst |= BIT(D40_SREG_CFG_EIM_POS);
100 101 102

	/* PSIZE */
	if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
103
		src |= BIT(D40_SREG_CFG_PHY_PEN_POS);
104 105 106
		src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
	}
	if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
107
		dst |= BIT(D40_SREG_CFG_PHY_PEN_POS);
108 109 110 111
		dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
	}

	/* Element size */
112 113 114 115
	src |= d40_width_to_bits(cfg->src_info.data_width)
		<< D40_SREG_CFG_ESIZE_POS;
	dst |= d40_width_to_bits(cfg->dst_info.data_width)
		<< D40_SREG_CFG_ESIZE_POS;
116 117 118

	/* Set the priority bit to high for the physical channel */
	if (cfg->high_priority) {
119 120
		src |= BIT(D40_SREG_CFG_PRI_POS);
		dst |= BIT(D40_SREG_CFG_PRI_POS);
121 122
	}

123
	if (cfg->src_info.big_endian)
124
		src |= BIT(D40_SREG_CFG_LBE_POS);
125
	if (cfg->dst_info.big_endian)
126
		dst |= BIT(D40_SREG_CFG_LBE_POS);
127 128 129 130 131

	*src_cfg = src;
	*dst_cfg = dst;
}

132 133 134 135 136
static int d40_phy_fill_lli(struct d40_phy_lli *lli,
			    dma_addr_t data,
			    u32 data_size,
			    dma_addr_t next_lli,
			    u32 reg_cfg,
137 138
			    struct stedma40_half_channel_info *info,
			    unsigned int flags)
139
{
140 141
	bool addr_inc = flags & LLI_ADDR_INC;
	bool term_int = flags & LLI_TERM_INT;
142 143
	unsigned int data_width = info->data_width;
	int psize = info->psize;
144 145 146 147 148 149 150 151
	int num_elems;

	if (psize == STEDMA40_PSIZE_PHY_1)
		num_elems = 1;
	else
		num_elems = 2 << psize;

	/* Must be aligned */
152
	if (!IS_ALIGNED(data, data_width))
153 154 155
		return -EINVAL;

	/* Transfer size can't be smaller than (num_elms * elem_size) */
156
	if (data_size < num_elems * data_width)
157 158 159
		return -EINVAL;

	/* The number of elements. IE now many chunks */
160
	lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
161 162 163 164 165

	/*
	 * Distance to next element sized entry.
	 * Usually the size of the element unless you want gaps.
	 */
166
	if (addr_inc)
167
		lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS;
168 169 170 171 172 173 174

	/* Where the data is */
	lli->reg_ptr = data;
	lli->reg_cfg = reg_cfg;

	/* If this scatter list entry is the last one, no next link */
	if (next_lli == 0)
175
		lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS);
176 177 178 179 180
	else
		lli->reg_lnk = next_lli;

	/* Set/clear interrupt generation on this link item.*/
	if (term_int)
181
		lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS);
182
	else
183
		lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS);
184

185 186 187 188
	/*
	 * Post link - D40_SREG_LNK_PHY_PRE_POS = 0
	 * Relink happens after transfer completion.
	 */
189 190 191 192

	return 0;
}

193 194 195 196
static int d40_seg_size(int size, int data_width1, int data_width2)
{
	u32 max_w = max(data_width1, data_width2);
	u32 min_w = min(data_width1, data_width2);
197
	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
198 199

	if (seg_max > STEDMA40_MAX_SEG_SIZE)
200
		seg_max -= max_w;
201 202 203 204 205

	if (size <= seg_max)
		return size;

	if (size <= 2 * seg_max)
206
		return ALIGN(size / 2, max_w);
207 208 209 210

	return seg_max;
}

211 212
static struct d40_phy_lli *
d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
R
Rabin Vincent 已提交
213
		   dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
214 215 216
		   struct stedma40_half_channel_info *info,
		   struct stedma40_half_channel_info *otherinfo,
		   unsigned long flags)
217
{
R
Rabin Vincent 已提交
218
	bool lastlink = flags & LLI_LAST_LINK;
219 220
	bool addr_inc = flags & LLI_ADDR_INC;
	bool term_int = flags & LLI_TERM_INT;
R
Rabin Vincent 已提交
221
	bool cyclic = flags & LLI_CYCLIC;
222 223 224 225 226
	int err;
	dma_addr_t next = lli_phys;
	int size_rest = size;
	int size_seg = 0;

227 228 229 230 231 232 233
	/*
	 * This piece may be split up based on d40_seg_size(); we only want the
	 * term int on the last part.
	 */
	if (term_int)
		flags &= ~LLI_TERM_INT;

234
	do {
235 236
		size_seg = d40_seg_size(size_rest, info->data_width,
					otherinfo->data_width);
237 238
		size_rest -= size_seg;

R
Rabin Vincent 已提交
239
		if (size_rest == 0 && term_int)
240
			flags |= LLI_TERM_INT;
R
Rabin Vincent 已提交
241 242 243 244

		if (size_rest == 0 && lastlink)
			next = cyclic ? first_phys : 0;
		else
245 246 247
			next = ALIGN(next + sizeof(struct d40_phy_lli),
				     D40_LLI_ALIGN);

248 249
		err = d40_phy_fill_lli(lli, addr, size_seg, next,
				       reg_cfg, info, flags);
250 251 252 253 254

		if (err)
			goto err;

		lli++;
255
		if (addr_inc)
256 257 258 259 260
			addr += size_seg;
	} while (size_rest);

	return lli;

261
err:
262 263 264
	return NULL;
}

265 266 267
int d40_phy_sg_to_lli(struct scatterlist *sg,
		      int sg_len,
		      dma_addr_t target,
268
		      struct d40_phy_lli *lli_sg,
269 270
		      dma_addr_t lli_phys,
		      u32 reg_cfg,
271
		      struct stedma40_half_channel_info *info,
R
Rabin Vincent 已提交
272 273
		      struct stedma40_half_channel_info *otherinfo,
		      unsigned long flags)
274 275 276 277
{
	int total_size = 0;
	int i;
	struct scatterlist *current_sg = sg;
278 279
	struct d40_phy_lli *lli = lli_sg;
	dma_addr_t l_phys = lli_phys;
280 281 282

	if (!target)
		flags |= LLI_ADDR_INC;
283 284

	for_each_sg(sg, current_sg, sg_len, i) {
285 286 287
		dma_addr_t sg_addr = sg_dma_address(current_sg);
		unsigned int len = sg_dma_len(current_sg);
		dma_addr_t dst = target ?: sg_addr;
288 289 290

		total_size += sg_dma_len(current_sg);

291
		if (i == sg_len - 1)
R
Rabin Vincent 已提交
292
			flags |= LLI_TERM_INT | LLI_LAST_LINK;
293

294 295 296
		l_phys = ALIGN(lli_phys + (lli - lli_sg) *
			       sizeof(struct d40_phy_lli), D40_LLI_ALIGN);

R
Rabin Vincent 已提交
297
		lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
298 299
					 reg_cfg, info, otherinfo, flags);

300 301
		if (lli == NULL)
			return -EINVAL;
302 303 304 305 306 307 308 309
	}

	return total_size;
}


/* DMA logical lli operations */

310 311
static void d40_log_lli_link(struct d40_log_lli *lli_dst,
			     struct d40_log_lli *lli_src,
R
Rabin Vincent 已提交
312
			     int next, unsigned int flags)
313
{
R
Rabin Vincent 已提交
314
	bool interrupt = flags & LLI_TERM_INT;
315 316 317 318 319 320
	u32 slos = 0;
	u32 dlos = 0;

	if (next != -EINVAL) {
		slos = next * 2;
		dlos = next * 2 + 1;
R
Rabin Vincent 已提交
321 322 323
	}

	if (interrupt) {
324 325 326 327 328 329 330 331 332 333 334 335 336 337
		lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
		lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
	}

	lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
		(slos << D40_MEM_LCSP1_SLOS_POS);

	lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
		(dlos << D40_MEM_LCSP1_SLOS_POS);
}

void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
			   struct d40_log_lli *lli_dst,
			   struct d40_log_lli *lli_src,
R
Rabin Vincent 已提交
338
			   int next, unsigned int flags)
339
{
R
Rabin Vincent 已提交
340
	d40_log_lli_link(lli_dst, lli_src, next, flags);
341

342 343 344 345
	writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
	writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
	writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
	writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
346 347 348 349 350
}

void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
			   struct d40_log_lli *lli_dst,
			   struct d40_log_lli *lli_src,
R
Rabin Vincent 已提交
351
			   int next, unsigned int flags)
352
{
R
Rabin Vincent 已提交
353
	d40_log_lli_link(lli_dst, lli_src, next, flags);
354

355 356 357 358
	writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
	writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
	writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
	writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
359 360
}

361 362 363 364
static void d40_log_fill_lli(struct d40_log_lli *lli,
			     dma_addr_t data, u32 data_size,
			     u32 reg_cfg,
			     u32 data_width,
365
			     unsigned int flags)
366
{
367 368
	bool addr_inc = flags & LLI_ADDR_INC;

369 370 371
	lli->lcsp13 = reg_cfg;

	/* The number of elements to transfer */
372
	lli->lcsp02 = ((data_size / data_width) <<
373
		       D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
374

375
	BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE);
376

377 378 379 380 381 382 383 384 385 386
	/* 16 LSBs address of the current element */
	lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
	/* 16 MSBs address of the current element */
	lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;

	if (addr_inc)
		lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;

}

387
static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
388 389 390 391 392
				       dma_addr_t addr,
				       int size,
				       u32 lcsp13, /* src or dst*/
				       u32 data_width1,
				       u32 data_width2,
393
				       unsigned int flags)
394
{
395
	bool addr_inc = flags & LLI_ADDR_INC;
396 397 398 399 400 401 402 403 404 405 406 407
	struct d40_log_lli *lli = lli_sg;
	int size_rest = size;
	int size_seg = 0;

	do {
		size_seg = d40_seg_size(size_rest, data_width1, data_width2);
		size_rest -= size_seg;

		d40_log_fill_lli(lli,
				 addr,
				 size_seg,
				 lcsp13, data_width1,
408
				 flags);
409 410 411 412 413 414 415 416
		if (addr_inc)
			addr += size_seg;
		lli++;
	} while (size_rest);

	return lli;
}

417
int d40_log_sg_to_lli(struct scatterlist *sg,
418
		      int sg_len,
419
		      dma_addr_t dev_addr,
420 421
		      struct d40_log_lli *lli_sg,
		      u32 lcsp13, /* src or dst*/
422
		      u32 data_width1, u32 data_width2)
423 424 425 426
{
	int total_size = 0;
	struct scatterlist *current_sg = sg;
	int i;
427
	struct d40_log_lli *lli = lli_sg;
428 429 430 431
	unsigned long flags = 0;

	if (!dev_addr)
		flags |= LLI_ADDR_INC;
432 433

	for_each_sg(sg, current_sg, sg_len, i) {
434 435 436 437
		dma_addr_t sg_addr = sg_dma_address(current_sg);
		unsigned int len = sg_dma_len(current_sg);
		dma_addr_t addr = dev_addr ?: sg_addr;

438
		total_size += sg_dma_len(current_sg);
439 440

		lli = d40_log_buf_to_lli(lli, addr, len,
441
					 lcsp13,
442 443
					 data_width1,
					 data_width2,
444
					 flags);
445
	}
446

447 448
	return total_size;
}