dma.c 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * OMAP2+ DMA driver
 *
 * Copyright (C) 2003 - 2008 Nokia Corporation
 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
 * Graphics DMA and LCD DMA graphics tranformations
 * by Imre Deak <imre.deak@nokia.com>
 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
 *
 * Copyright (C) 2009 Texas Instruments
 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
 * Converted DMA library into platform driver
 *	- G, Manjunath Kondaiah <manjugk@ti.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
30
#include <linux/dma-mapping.h>
31
#include <linux/of.h>
32
#include <linux/omap-dma.h>
33

34
#include "soc.h"
35
#include "omap_hwmod.h"
36 37
#include "omap_device.h"

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
#define OMAP2_DMA_STRIDE	0x60

static u32 errata;
static u8 dma_stride;

static struct omap_dma_dev_attr *d;

static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;

static u16 reg_map[] = {
	[REVISION]		= 0x00,
	[GCR]			= 0x78,
	[IRQSTATUS_L0]		= 0x08,
	[IRQSTATUS_L1]		= 0x0c,
	[IRQSTATUS_L2]		= 0x10,
	[IRQSTATUS_L3]		= 0x14,
	[IRQENABLE_L0]		= 0x18,
	[IRQENABLE_L1]		= 0x1c,
	[IRQENABLE_L2]		= 0x20,
	[IRQENABLE_L3]		= 0x24,
	[SYSSTATUS]		= 0x28,
	[OCP_SYSCONFIG]		= 0x2c,
	[CAPS_0]		= 0x64,
	[CAPS_2]		= 0x6c,
	[CAPS_3]		= 0x70,
	[CAPS_4]		= 0x74,

	/* Common register offsets */
	[CCR]			= 0x80,
	[CLNK_CTRL]		= 0x84,
	[CICR]			= 0x88,
	[CSR]			= 0x8c,
	[CSDP]			= 0x90,
	[CEN]			= 0x94,
	[CFN]			= 0x98,
	[CSEI]			= 0xa4,
	[CSFI]			= 0xa8,
	[CDEI]			= 0xac,
	[CDFI]			= 0xb0,
	[CSAC]			= 0xb4,
	[CDAC]			= 0xb8,

	/* Channel specific register offsets */
	[CSSA]			= 0x9c,
	[CDSA]			= 0xa0,
	[CCEN]			= 0xbc,
	[CCFN]			= 0xc0,
	[COLOR]			= 0xc4,

	/* OMAP4 specific registers */
	[CDP]			= 0xd0,
	[CNDP]			= 0xd4,
	[CCDN]			= 0xd8,
};

static void __iomem *dma_base;
static inline void dma_write(u32 val, int reg, int lch)
{
	u8  stride;
	u32 offset;

	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
	offset = reg_map[reg] + (stride * lch);
	__raw_writel(val, dma_base + offset);
}

static inline u32 dma_read(int reg, int lch)
{
	u8 stride;
	u32 offset, val;

	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
	offset = reg_map[reg] + (stride * lch);
	val = __raw_readl(dma_base + offset);
	return val;
}

static inline void omap2_disable_irq_lch(int lch)
{
	u32 val;

	val = dma_read(IRQENABLE_L0, lch);
	val &= ~(1 << lch);
	dma_write(val, IRQENABLE_L0, lch);
}

static void omap2_clear_dma(int lch)
{
	int i = dma_common_ch_start;

	for (; i <= dma_common_ch_end; i += 1)
		dma_write(0, i, lch);
}

static void omap2_show_dma_caps(void)
{
	u8 revision = dma_read(REVISION, 0) & 0xff;
	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
				revision >> 4, revision & 0xf);
	return;
}

static u32 configure_dma_errata(void)
{

	/*
	 * Errata applicable for OMAP2430ES1.0 and all omap2420
	 *
	 * I.
	 * Erratum ID: Not Available
	 * Inter Frame DMA buffering issue DMA will wrongly
	 * buffer elements if packing and bursting is enabled. This might
	 * result in data gets stalled in FIFO at the end of the block.
	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
	 * guarantee no data will stay in the DMA FIFO in case inter frame
	 * buffering occurs
	 *
	 * II.
	 * Erratum ID: Not Available
	 * DMA may hang when several channels are used in parallel
	 * In the following configuration, DMA channel hanging can occur:
	 * a. Channel i, hardware synchronized, is enabled
	 * b. Another channel (Channel x), software synchronized, is enabled.
	 * c. Channel i is disabled before end of transfer
	 * d. Channel i is reenabled.
	 * e. Steps 1 to 4 are repeated a certain number of times.
	 * f. A third channel (Channel y), software synchronized, is enabled.
	 * Channel x and Channel y may hang immediately after step 'f'.
	 * Workaround:
	 * For any channel used - make sure NextLCH_ID is set to the value j.
	 */
	if (cpu_is_omap2420() || (cpu_is_omap2430() &&
				(omap_type() == OMAP2430_REV_ES1_0))) {

		SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
		SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
	}

	/*
	 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
	 * after a transaction error.
	 * Workaround: SW should explicitely disable the channel.
	 */
	if (cpu_class_is_omap2())
		SET_DMA_ERRATA(DMA_ERRATA_i378);

	/*
	 * Erratum ID: i541: sDMA FIFO draining does not finish
	 * If sDMA channel is disabled on the fly, sDMA enters standby even
	 * through FIFO Drain is still in progress
	 * Workaround: Put sDMA in NoStandby more before a logical channel is
	 * disabled, then put it back to SmartStandby right after the channel
	 * finishes FIFO draining.
	 */
	if (cpu_is_omap34xx())
		SET_DMA_ERRATA(DMA_ERRATA_i541);

	/*
	 * Erratum ID: i88 : Special programming model needed to disable DMA
	 * before end of block.
	 * Workaround: software must ensure that the DMA is configured in No
	 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
	 */
	if (omap_type() == OMAP3430_REV_ES1_0)
		SET_DMA_ERRATA(DMA_ERRATA_i88);

	/*
	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
	 * read before the DMA controller finished disabling the channel.
	 */
	SET_DMA_ERRATA(DMA_ERRATA_3_3);

	/*
	 * Erratum ID: Not Available
	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
	 * after secure sram context save and restore.
	 * Work around: Hence we need to manually clear those IRQs to avoid
	 * spurious interrupts. This affects only secure devices.
	 */
	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
		SET_DMA_ERRATA(DMA_ROMCODE_BUG);

	return errata;
}

223 224 225
/* One time initializations */
static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
{
226
	struct platform_device			*pdev;
227
	struct omap_system_dma_plat_info	*p;
228
	struct resource				*mem;
229 230
	char					*name = "omap_dma_system";

231 232 233
	dma_stride		= OMAP2_DMA_STRIDE;
	dma_common_ch_start	= CSDP;

234 235 236 237 238 239 240
	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
	if (!p) {
		pr_err("%s: Unable to allocate pdata for %s:%s\n",
			__func__, name, oh->name);
		return -ENOMEM;
	}

241 242 243 244 245 246 247 248 249 250 251
	p->dma_attr		= (struct omap_dma_dev_attr *)oh->dev_attr;
	p->disable_irq_lch	= omap2_disable_irq_lch;
	p->show_dma_caps	= omap2_show_dma_caps;
	p->clear_dma		= omap2_clear_dma;
	p->dma_write		= dma_write;
	p->dma_read		= dma_read;

	p->clear_lch_regs	= NULL;

	p->errata		= configure_dma_errata();

252
	pdev = omap_device_build(name, 0, oh, p, sizeof(*p));
253
	kfree(p);
254
	if (IS_ERR(pdev)) {
L
Lucas De Marchi 已提交
255
		pr_err("%s: Can't build omap_device for %s:%s.\n",
256
			__func__, name, oh->name);
257
		return PTR_ERR(pdev);
258 259
	}

260
	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
261
	if (!mem) {
262
		dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
263 264 265 266
		return -EINVAL;
	}
	dma_base = ioremap(mem->start, resource_size(mem));
	if (!dma_base) {
267
		dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
268 269 270 271 272 273 274 275
		return -ENOMEM;
	}

	d = oh->dev_attr;
	d->chan = kzalloc(sizeof(struct omap_dma_lch) *
					(d->lch_count), GFP_KERNEL);

	if (!d->chan) {
276
		dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
277 278
		return -ENOMEM;
	}
279

280 281 282
	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
		d->dev_caps |= HS_CHANNELS_RESERVED;

283 284 285 286 287 288
	/* Check the capabilities register for descriptor loading feature */
	if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
		dma_common_ch_end = CCDN;
	else
		dma_common_ch_end = CCFN;

289 290 291
	return 0;
}

292 293 294 295 296 297
static const struct platform_device_info omap_dma_dev_info = {
	.name = "omap-dma-engine",
	.id = -1,
	.dma_mask = DMA_BIT_MASK(32),
};

298 299
static int __init omap2_system_dma_init(void)
{
300 301 302 303
	struct platform_device *pdev;
	int res;

	res = omap_hwmod_for_each_by_class("dma",
304
			omap2_system_dma_init_dev, NULL);
305 306 307
	if (res)
		return res;

308 309 310
	if (of_have_populated_dt())
		return res;

311 312 313 314 315
	pdev = platform_device_register_full(&omap_dma_dev_info);
	if (IS_ERR(pdev))
		return PTR_ERR(pdev);

	return res;
316
}
T
Tony Lindgren 已提交
317
omap_arch_initcall(omap2_system_dma_init);