提交 dcded10f 编写于 作者: L Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (30 commits)
  DMAENGINE: at_hdmac: locking fixlet
  DMAENGINE: pch_dma: kill another usage of __raw_{read|write}l
  dma: dmatest: fix potential sign bug
  ioat2: catch and recover from broken vtd configurations v6
  DMAENGINE: add runtime slave control to COH 901 318 v3
  DMAENGINE: add runtime slave config to DMA40 v3
  DMAENGINE: generic slave channel control v3
  dmaengine: Driver for Topcliff PCH DMA controller
  intel_mid: Add Mrst & Mfld DMA Drivers
  drivers/dma: Eliminate a NULL pointer dereference
  dma/timb_dma: compile warning on 32 bit
  DMAENGINE: ste_dma40: support older silicon
  DMAENGINE: ste_dma40: support disabling physical channels
  DMAENGINE: ste_dma40: no disabled phy channels on ux500
  DMAENGINE: ste_dma40: fix suspend bug
  DMAENGINE: ste_dma40: add DB8500 memcpy channels
  DMAENGINE: ste_dma40: no flow control on memcpy
  DMAENGINE: ste_dma40: arch updates for LCLA and LCPA
  DMAENGINE: ste_dma40: allocate LCLA dynamically
  DMAENGINE: ste_dma40: no premature stop
  ...

Fix up trivial conflicts in arch/arm/mach-ux500/devices-db8500.c
...@@ -113,26 +113,21 @@ struct platform_device u8500_i2c4_device = { ...@@ -113,26 +113,21 @@ struct platform_device u8500_i2c4_device = {
static struct resource dma40_resources[] = { static struct resource dma40_resources[] = {
[0] = { [0] = {
.start = U8500_DMA_BASE, .start = U8500_DMA_BASE,
.end = U8500_DMA_BASE + SZ_4K - 1, .end = U8500_DMA_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
.name = "base", .name = "base",
}, },
[1] = { [1] = {
.start = U8500_DMA_LCPA_BASE, .start = U8500_DMA_LCPA_BASE,
.end = U8500_DMA_LCPA_BASE + SZ_4K - 1, .end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
.name = "lcpa", .name = "lcpa",
}, },
[2] = { [2] = {
.start = U8500_DMA_LCLA_BASE,
.end = U8500_DMA_LCLA_BASE + 16 * 1024 - 1,
.flags = IORESOURCE_MEM,
.name = "lcla",
},
[3] = {
.start = IRQ_DB8500_DMA, .start = IRQ_DB8500_DMA,
.end = IRQ_DB8500_DMA, .end = IRQ_DB8500_DMA,
.flags = IORESOURCE_IRQ} .flags = IORESOURCE_IRQ,
}
}; };
/* Default configuration for physcial memcpy */ /* Default configuration for physcial memcpy */
...@@ -145,11 +140,12 @@ struct stedma40_chan_cfg dma40_memcpy_conf_phy = { ...@@ -145,11 +140,12 @@ struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.src_info.endianess = STEDMA40_LITTLE_ENDIAN, .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_PHY_1, .src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
.dst_info.endianess = STEDMA40_LITTLE_ENDIAN, .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_PHY_1, .dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
}; };
/* Default configuration for logical memcpy */ /* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = { struct stedma40_chan_cfg dma40_memcpy_conf_log = {
...@@ -162,11 +158,12 @@ struct stedma40_chan_cfg dma40_memcpy_conf_log = { ...@@ -162,11 +158,12 @@ struct stedma40_chan_cfg dma40_memcpy_conf_log = {
.src_info.endianess = STEDMA40_LITTLE_ENDIAN, .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_LOG_1, .src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
.dst_info.endianess = STEDMA40_LITTLE_ENDIAN, .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_LOG_1, .dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
}; };
/* /*
...@@ -180,10 +177,12 @@ static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV]; ...@@ -180,10 +177,12 @@ static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV];
/* Reserved event lines for memcpy only */ /* Reserved event lines for memcpy only */
static int dma40_memcpy_event[] = { static int dma40_memcpy_event[] = {
STEDMA40_MEMCPY_TX_0,
STEDMA40_MEMCPY_TX_1, STEDMA40_MEMCPY_TX_1,
STEDMA40_MEMCPY_TX_2, STEDMA40_MEMCPY_TX_2,
STEDMA40_MEMCPY_TX_3, STEDMA40_MEMCPY_TX_3,
STEDMA40_MEMCPY_TX_4, STEDMA40_MEMCPY_TX_4,
STEDMA40_MEMCPY_TX_5,
}; };
static struct stedma40_platform_data dma40_plat_data = { static struct stedma40_platform_data dma40_plat_data = {
...@@ -195,6 +194,7 @@ static struct stedma40_platform_data dma40_plat_data = { ...@@ -195,6 +194,7 @@ static struct stedma40_platform_data dma40_plat_data = {
.memcpy_conf_phy = &dma40_memcpy_conf_phy, .memcpy_conf_phy = &dma40_memcpy_conf_phy,
.memcpy_conf_log = &dma40_memcpy_conf_log, .memcpy_conf_log = &dma40_memcpy_conf_log,
.llis_per_log = 8, .llis_per_log = 8,
.disabled_channels = {-1},
}; };
struct platform_device u8500_dma40_device = { struct platform_device u8500_dma40_device = {
...@@ -213,4 +213,6 @@ void dma40_u8500ed_fixup(void) ...@@ -213,4 +213,6 @@ void dma40_u8500ed_fixup(void)
dma40_plat_data.memcpy_len = 0; dma40_plat_data.memcpy_len = 0;
dma40_resources[0].start = U8500_DMA_BASE_ED; dma40_resources[0].start = U8500_DMA_BASE_ED;
dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1; dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1;
dma40_resources[1].start = U8500_DMA_LCPA_BASE_ED;
dma40_resources[1].end = U8500_DMA_LCPA_BASE_ED + 2 * SZ_1K - 1;
} }
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE) #define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE)
#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE) #define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE)
#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE) #define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE)
/* Use bank 4 for DMA LCLA and LCPA */ /* Use bank 4 for DMA LCPA */
#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4 #define U8500_DMA_LCPA_BASE U8500_ESRAM_BANK4
#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK4 + 0x4000) #define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000)
#define U8500_PER3_BASE 0x80000000 #define U8500_PER3_BASE 0x80000000
#define U8500_STM_BASE 0x80100000 #define U8500_STM_BASE 0x80100000
......
...@@ -136,7 +136,7 @@ enum dma_dest_dev_type { ...@@ -136,7 +136,7 @@ enum dma_dest_dev_type {
STEDMA40_DEV_CAC1_TX = 48, STEDMA40_DEV_CAC1_TX = 48,
STEDMA40_DEV_CAC1_TX_HAC1_TX = 49, STEDMA40_DEV_CAC1_TX_HAC1_TX = 49,
STEDMA40_DEV_HAC1_TX = 50, STEDMA40_DEV_HAC1_TX = 50,
STEDMA40_MEMXCPY_TX_0 = 51, STEDMA40_MEMCPY_TX_0 = 51,
STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52, STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52,
STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53, STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53,
STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54, STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54,
......
...@@ -148,7 +148,8 @@ struct stedma40_chan_cfg { ...@@ -148,7 +148,8 @@ struct stedma40_chan_cfg {
* @memcpy_conf_phy: default configuration of physical channel memcpy * @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy * @memcpy_conf_log: default configuration of logical channel memcpy
* @llis_per_log: number of max linked list items per logical channel * @llis_per_log: number of max linked list items per logical channel
* * @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
*/ */
struct stedma40_platform_data { struct stedma40_platform_data {
u32 dev_len; u32 dev_len;
...@@ -159,6 +160,7 @@ struct stedma40_platform_data { ...@@ -159,6 +160,7 @@ struct stedma40_platform_data {
struct stedma40_chan_cfg *memcpy_conf_phy; struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log; struct stedma40_chan_cfg *memcpy_conf_log;
unsigned int llis_per_log; unsigned int llis_per_log;
int disabled_channels[8];
}; };
/** /**
......
...@@ -33,6 +33,19 @@ if DMADEVICES ...@@ -33,6 +33,19 @@ if DMADEVICES
comment "DMA Devices" comment "DMA Devices"
config INTEL_MID_DMAC
tristate "Intel MID DMA support for Peripheral DMA controllers"
depends on PCI && X86
select DMA_ENGINE
default n
help
Enable support for the Intel(R) MID DMA engine present
in Intel MID chipsets.
Say Y here if you have such a chipset.
If unsure, say N.
config ASYNC_TX_DISABLE_CHANNEL_SWITCH config ASYNC_TX_DISABLE_CHANNEL_SWITCH
bool bool
...@@ -175,6 +188,13 @@ config PL330_DMA ...@@ -175,6 +188,13 @@ config PL330_DMA
You need to provide platform specific settings via You need to provide platform specific settings via
platform_data for a dma-pl330 device. platform_data for a dma-pl330 device.
config PCH_DMA
tristate "Topcliff PCH DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for the Topcliff PCH DMA engine.
config DMA_ENGINE config DMA_ENGINE
bool bool
......
...@@ -7,6 +7,7 @@ endif ...@@ -7,6 +7,7 @@ endif
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
...@@ -23,3 +24,4 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ ...@@ -23,3 +24,4 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
...@@ -790,12 +790,12 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -790,12 +790,12 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_splice_init(&atchan->queue, &list); list_splice_init(&atchan->queue, &list);
list_splice_init(&atchan->active_list, &list); list_splice_init(&atchan->active_list, &list);
spin_unlock_bh(&atchan->lock);
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc); atc_chain_complete(atchan, desc);
spin_unlock_bh(&atchan->lock);
return 0; return 0;
} }
......
...@@ -72,6 +72,9 @@ struct coh901318_chan { ...@@ -72,6 +72,9 @@ struct coh901318_chan {
unsigned long nbr_active_done; unsigned long nbr_active_done;
unsigned long busy; unsigned long busy;
u32 runtime_addr;
u32 runtime_ctrl;
struct coh901318_base *base; struct coh901318_base *base;
}; };
...@@ -190,6 +193,9 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) ...@@ -190,6 +193,9 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
static inline dma_addr_t static inline dma_addr_t
cohc_dev_addr(struct coh901318_chan *cohc) cohc_dev_addr(struct coh901318_chan *cohc)
{ {
/* Runtime supplied address will take precedence */
if (cohc->runtime_addr)
return cohc->runtime_addr;
return cohc->base->platform->chan_conf[cohc->id].dev_addr; return cohc->base->platform->chan_conf[cohc->id].dev_addr;
} }
...@@ -1055,6 +1061,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -1055,6 +1061,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
params = cohc_chan_param(cohc); params = cohc_chan_param(cohc);
config = params->config; config = params->config;
/*
* Add runtime-specific control on top, make
* sure the bits you set per peripheral channel are
* cleared in the default config from the platform.
*/
ctrl_chained |= cohc->runtime_ctrl;
ctrl_last |= cohc->runtime_ctrl;
ctrl |= cohc->runtime_ctrl;
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
...@@ -1113,6 +1127,12 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -1113,6 +1127,12 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret) if (ret)
goto err_lli_fill; goto err_lli_fill;
/*
* Set the default ctrl for the channel to the one from the lli,
* things may have changed due to odd buffer alignment etc.
*/
coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli)); COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */ /* Pick a descriptor to handle this transfer */
...@@ -1175,6 +1195,146 @@ coh901318_issue_pending(struct dma_chan *chan) ...@@ -1175,6 +1195,146 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
} }
/*
* Here we wrap in the runtime dma control interface
*/
struct burst_table {
int burst_8bit;
int burst_16bit;
int burst_32bit;
u32 reg;
};
static const struct burst_table burst_sizes[] = {
{
.burst_8bit = 64,
.burst_16bit = 32,
.burst_32bit = 16,
.reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES,
},
{
.burst_8bit = 48,
.burst_16bit = 24,
.burst_32bit = 12,
.reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES,
},
{
.burst_8bit = 32,
.burst_16bit = 16,
.burst_32bit = 8,
.reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES,
},
{
.burst_8bit = 16,
.burst_16bit = 8,
.burst_32bit = 4,
.reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES,
},
{
.burst_8bit = 8,
.burst_16bit = 4,
.burst_32bit = 2,
.reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES,
},
{
.burst_8bit = 4,
.burst_16bit = 2,
.burst_32bit = 1,
.reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES,
},
{
.burst_8bit = 2,
.burst_16bit = 1,
.burst_32bit = 0,
.reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES,
},
{
.burst_8bit = 1,
.burst_16bit = 0,
.burst_32bit = 0,
.reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE,
},
};
static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
u32 runtime_ctrl = 0;
int i = 0;
/* We only support mem to per or per to mem transfers */
if (config->direction == DMA_FROM_DEVICE) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else if (config->direction == DMA_TO_DEVICE) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else {
dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
return;
}
dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
addr_width);
switch (addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
runtime_ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
while (i < ARRAY_SIZE(burst_sizes)) {
if (burst_sizes[i].burst_8bit <= maxburst)
break;
i++;
}
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
runtime_ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
while (i < ARRAY_SIZE(burst_sizes)) {
if (burst_sizes[i].burst_16bit <= maxburst)
break;
i++;
}
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
/* Direction doesn't matter here, it's 32/32 bits */
runtime_ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
while (i < ARRAY_SIZE(burst_sizes)) {
if (burst_sizes[i].burst_32bit <= maxburst)
break;
i++;
}
break;
default:
dev_err(COHC_2_DEV(cohc),
"bad runtimeconfig: alien address width\n");
return;
}
runtime_ctrl |= burst_sizes[i].reg;
dev_dbg(COHC_2_DEV(cohc),
"selected burst size %d bytes for address width %d bytes, maxburst %d\n",
burst_sizes[i].burst_8bit, addr_width, maxburst);
cohc->runtime_addr = addr;
cohc->runtime_ctrl = runtime_ctrl;
}
static int static int
coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg) unsigned long arg)
...@@ -1184,6 +1344,14 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -1184,6 +1344,14 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct coh901318_desc *cohd; struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase; void __iomem *virtbase = cohc->base->virtbase;
if (cmd == DMA_SLAVE_CONFIG) {
struct dma_slave_config *config =
(struct dma_slave_config *) arg;
coh901318_dma_set_runtimeconfig(chan, config);
return 0;
}
if (cmd == DMA_PAUSE) { if (cmd == DMA_PAUSE) {
coh901318_pause(chan); coh901318_pause(chan);
return 0; return 0;
...@@ -1240,6 +1408,7 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -1240,6 +1408,7 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return 0; return 0;
} }
void coh901318_base_init(struct dma_device *dma, const int *pick_chans, void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base) struct coh901318_base *base)
{ {
......
...@@ -540,7 +540,7 @@ static int dmatest_add_channel(struct dma_chan *chan) ...@@ -540,7 +540,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
struct dmatest_chan *dtc; struct dmatest_chan *dtc;
struct dma_device *dma_dev = chan->device; struct dma_device *dma_dev = chan->device;
unsigned int thread_count = 0; unsigned int thread_count = 0;
unsigned int cnt; int cnt;
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) { if (!dtc) {
......
此差异已折叠。
/*
* intel_mid_dma_regs.h - Intel MID DMA Drivers
*
* Copyright (C) 2008-10 Intel Corp
* Author: Vinod Koul <vinod.koul@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*
*/
#ifndef __INTEL_MID_DMAC_REGS_H__
#define __INTEL_MID_DMAC_REGS_H__
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/pci_ids.h>
#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
#define REG_BIT0 0x00000001
#define REG_BIT8 0x00000100
#define UNMASK_INTR_REG(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
#define ENABLE_CHANNEL(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
#define DESCS_PER_CHANNEL 16
/*DMA Registers*/
/*registers associated with channel programming*/
#define DMA_REG_SIZE 0x400
#define DMA_CH_SIZE 0x58
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
#define SAR 0x00 /* Source Address Register*/
#define DAR 0x08 /* Destination Address Register*/
#define CTL_LOW 0x18 /* Control Register*/
#define CTL_HIGH 0x1C /* Control Register*/
#define CFG_LOW 0x40 /* Configuration Register Low*/
#define CFG_HIGH 0x44 /* Configuration Register high*/
#define STATUS_TFR 0x2E8
#define STATUS_BLOCK 0x2F0
#define STATUS_ERR 0x308
#define RAW_TFR 0x2C0
#define RAW_BLOCK 0x2C8
#define RAW_ERR 0x2E0
#define MASK_TFR 0x310
#define MASK_BLOCK 0x318
#define MASK_SRC_TRAN 0x320
#define MASK_DST_TRAN 0x328
#define MASK_ERR 0x330
#define CLEAR_TFR 0x338
#define CLEAR_BLOCK 0x340
#define CLEAR_SRC_TRAN 0x348
#define CLEAR_DST_TRAN 0x350
#define CLEAR_ERR 0x358
#define INTR_STATUS 0x360
#define DMA_CFG 0x398
#define DMA_CHAN_EN 0x3A0
/*DMA channel control registers*/
union intel_mid_dma_ctl_lo {
struct {
u32 int_en:1; /*enable or disable interrupts*/
/*should be 0*/
u32 dst_tr_width:3; /*destination transfer width*/
/*usually 32 bits = 010*/
u32 src_tr_width:3; /*source transfer width*/
/*usually 32 bits = 010*/
u32 dinc:2; /*destination address inc/dec*/
/*For mem:INC=00, Periphral NoINC=11*/
u32 sinc:2; /*source address inc or dec, as above*/
u32 dst_msize:3; /*destination burst transaction length*/
/*always = 16 ie 011*/
u32 src_msize:3; /*source burst transaction length*/
/*always = 16 ie 011*/
u32 reser1:3;
u32 tt_fc:3; /*transfer type and flow controller*/
/*M-M = 000
P-M = 010
M-P = 001*/
u32 dms:2; /*destination master select = 0*/
u32 sms:2; /*source master select = 0*/
u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
u32 llp_src_en:1; /*enable/disable source LLP = 0*/
u32 reser2:3;
} ctlx;
u32 ctl_lo;
};
union intel_mid_dma_ctl_hi {
struct {
u32 block_ts:12; /*block transfer size*/
/*configured by DMAC*/
u32 reser:20;
} ctlx;
u32 ctl_hi;
};
/*DMA channel configuration registers*/
union intel_mid_dma_cfg_lo {
struct {
u32 reser1:5;
u32 ch_prior:3; /*channel priority = 0*/
u32 ch_susp:1; /*channel suspend = 0*/
u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
/*HW = 0, SW = 1*/
u32 hs_sel_src:1; /*select HW/SW src handshaking*/
u32 reser2:6;
u32 dst_hs_pol:1; /*dest HS interface polarity*/
u32 src_hs_pol:1; /*src HS interface polarity*/
u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
u32 reload_src:1; /*auto reload src addr =1 if src is P*/
u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
} cfgx;
u32 cfg_lo;
};
union intel_mid_dma_cfg_hi {
struct {
u32 fcmode:1; /*flow control mode = 1*/
u32 fifo_mode:1; /*FIFO mode select = 1*/
u32 protctl:3; /*protection control = 0*/
u32 rsvd:2;
u32 src_per:4; /*src hw HS interface*/
u32 dst_per:4; /*dstn hw HS interface*/
u32 reser2:17;
} cfgx;
u32 cfg_hi;
};
/**
* struct intel_mid_dma_chan - internal mid representation of a DMA channel
* @chan: dma_chan strcture represetation for mid chan
* @ch_regs: MMIO register space pointer to channel register
* @dma_base: MMIO register space DMA engine base pointer
* @ch_id: DMA channel id
* @lock: channel spinlock
* @completed: DMA cookie
* @active_list: current active descriptors
* @queue: current queued up descriptors
* @free_list: current free descriptors
* @slave: dma slave struture
* @descs_allocated: total number of decsiptors allocated
* @dma: dma device struture pointer
* @in_use: bool representing if ch is in use or not
*/
struct intel_mid_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
void __iomem *dma_base;
int ch_id;
spinlock_t lock;
dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
struct intel_mid_dma_slave *slave;
unsigned int descs_allocated;
struct middma_device *dma;
bool in_use;
};
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
struct dma_chan *chan)
{
return container_of(chan, struct intel_mid_dma_chan, chan);
}
/**
* struct middma_device - internal representation of a DMA device
* @pdev: PCI device
* @dma_base: MMIO register space pointer of DMA
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @tasklet: dma tasklet for processing interrupts
* @ch: per channel data
* @pci_id: DMA device PCI ID
* @intr_mask: Interrupt mask to be used
* @mask_reg: MMIO register for periphral mask
* @chan_base: Base ch index (read from driver data)
* @max_chan: max number of chs supported (from drv_data)
* @block_size: Block size of DMA transfer supported (from drv_data)
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
*/
struct middma_device {
struct pci_dev *pdev;
void __iomem *dma_base;
struct pci_pool *dma_pool;
struct dma_device common;
struct tasklet_struct tasklet;
struct intel_mid_dma_chan ch[MAX_CHAN];
unsigned int pci_id;
unsigned int intr_mask;
void __iomem *mask_reg;
int chan_base;
int max_chan;
int block_size;
unsigned int pimr_mask;
};
static inline struct middma_device *to_middma_device(struct dma_device *common)
{
return container_of(common, struct middma_device, common);
}
struct intel_mid_dma_desc {
void __iomem *block; /*ch ptr*/
struct list_head desc_node;
struct dma_async_tx_descriptor txd;
size_t len;
dma_addr_t sar;
dma_addr_t dar;
u32 cfg_hi;
u32 cfg_lo;
u32 ctl_lo;
u32 ctl_hi;
dma_addr_t next;
enum dma_data_direction dirn;
enum dma_status status;
enum intel_mid_dma_width width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
};
static inline int test_ch_en(void __iomem *dma, u32 ch_no)
{
u32 en_reg = ioread32(dma + DMA_CHAN_EN);
return (en_reg >> ch_no) & 0x1;
}
static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct intel_mid_dma_desc, txd);
}
#endif /*__INTEL_MID_DMAC_REGS_H__*/
...@@ -97,6 +97,7 @@ struct ioat_chan_common { ...@@ -97,6 +97,7 @@ struct ioat_chan_common {
#define IOAT_RESET_PENDING 2 #define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4 #define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
struct timer_list timer; struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000) #define IDLE_TIMEOUT msecs_to_jiffies(2000)
......
...@@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data) ...@@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n", dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr); __func__, chanerr);
BUG_ON(is_ioat_bug(chanerr)); if (test_bit(IOAT_RUN, &chan->state))
BUG_ON(is_ioat_bug(chanerr));
else /* we never got off the ground */
return;
} }
/* if we haven't made progress and we have already /* if we haven't made progress and we have already
...@@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf ...@@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf
return ring; return ring;
} }
void ioat2_free_chan_resources(struct dma_chan *c);
/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
* @chan: channel to be initialized * @chan: channel to be initialized
*/ */
...@@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ...@@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
u64 status;
int order; int order;
/* have we already been set up? */ /* have we already been set up? */
...@@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ...@@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
tasklet_enable(&chan->cleanup_task); tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat); ioat2_start_null_desc(ioat);
return 1 << ioat->alloc_order; /* check that we got off the ground */
udelay(5);
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status)) {
set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_WARN(to_dev(chan),
"failed to start channel chanerr: %#x\n", chanerr);
ioat2_free_chan_resources(c);
return -EFAULT;
}
} }
bool reshape_ring(struct ioat2_dma_chan *ioat, int order) bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
...@@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) ...@@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
del_timer_sync(&chan->timer); del_timer_sync(&chan->timer);
device->cleanup_fn((unsigned long) c); device->cleanup_fn((unsigned long) c);
device->reset_hw(chan); device->reset_hw(chan);
clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock); spin_lock_bh(&ioat->prep_lock);
......
...@@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data) ...@@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n", dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr); __func__, chanerr);
BUG_ON(is_ioat_bug(chanerr)); if (test_bit(IOAT_RUN, &chan->state))
BUG_ON(is_ioat_bug(chanerr));
else /* we never got off the ground */
return;
} }
/* if we haven't made progress and we have already /* if we haven't made progress and we have already
......
此差异已折叠。
此差异已折叠。
...@@ -315,11 +315,8 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, ...@@ -315,11 +315,8 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
int total_size = 0; int total_size = 0;
struct scatterlist *current_sg = sg; struct scatterlist *current_sg = sg;
int i; int i;
u32 next_lli_off_dst; u32 next_lli_off_dst = 0;
u32 next_lli_off_src; u32 next_lli_off_src = 0;
next_lli_off_src = 0;
next_lli_off_dst = 0;
for_each_sg(sg, current_sg, sg_len, i) { for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg); total_size += sg_dma_len(current_sg);
...@@ -351,7 +348,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, ...@@ -351,7 +348,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off_src, next_lli_off_src,
lcsp->lcsp1, src_data_width, lcsp->lcsp1, src_data_width,
term_int && !next_lli_off_src, false,
true); true);
d40_log_fill_lli(&lli->dst[i], d40_log_fill_lli(&lli->dst[i],
dev_addr, dev_addr,
...@@ -375,7 +372,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, ...@@ -375,7 +372,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off_src, next_lli_off_src,
lcsp->lcsp1, src_data_width, lcsp->lcsp1, src_data_width,
term_int && !next_lli_off_src, false,
false); false);
} }
} }
...@@ -423,32 +420,35 @@ int d40_log_sg_to_lli(int lcla_id, ...@@ -423,32 +420,35 @@ int d40_log_sg_to_lli(int lcla_id,
return total_size; return total_size;
} }
void d40_log_lli_write(struct d40_log_lli_full *lcpa, int d40_log_lli_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lcla_src, struct d40_log_lli *lcla_src,
struct d40_log_lli *lcla_dst, struct d40_log_lli *lcla_dst,
struct d40_log_lli *lli_dst, struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src, struct d40_log_lli *lli_src,
int llis_per_log) int llis_per_log)
{ {
u32 slos = 0; u32 slos;
u32 dlos = 0; u32 dlos;
int i; int i;
lcpa->lcsp0 = lli_src->lcsp02; writel(lli_src->lcsp02, &lcpa->lcsp0);
lcpa->lcsp1 = lli_src->lcsp13; writel(lli_src->lcsp13, &lcpa->lcsp1);
lcpa->lcsp2 = lli_dst->lcsp02; writel(lli_dst->lcsp02, &lcpa->lcsp2);
lcpa->lcsp3 = lli_dst->lcsp13; writel(lli_dst->lcsp13, &lcpa->lcsp3);
slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
for (i = 0; (i < llis_per_log) && slos && dlos; i++) { for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02); writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13); writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02); writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13); writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
} }
return i;
} }
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#define D40_DREG_PCDELTA (8 * 4) #define D40_DREG_PCDELTA (8 * 4)
#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */ #define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
#define D40_LCPA_CHAN_SIZE 32
#define D40_LCPA_CHAN_DST_DELTA 16
#define D40_TYPE_TO_GROUP(type) (type / 16) #define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16) #define D40_TYPE_TO_EVENT(type) (type % 16)
...@@ -336,12 +339,12 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, ...@@ -336,12 +339,12 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
bool term_int, dma_addr_t dev_addr, int max_len, bool term_int, dma_addr_t dev_addr, int max_len,
int llis_per_log); int llis_per_log);
void d40_log_lli_write(struct d40_log_lli_full *lcpa, int d40_log_lli_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lcla_src, struct d40_log_lli *lcla_src,
struct d40_log_lli *lcla_dst, struct d40_log_lli *lcla_dst,
struct d40_log_lli *lli_dst, struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src, struct d40_log_lli *lli_src,
int llis_per_log); int llis_per_log);
int d40_log_sg_to_lli(int lcla_id, int d40_log_sg_to_lli(int lcla_id,
struct scatterlist *sg, struct scatterlist *sg,
......
...@@ -200,8 +200,8 @@ static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, ...@@ -200,8 +200,8 @@ static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
return -EINVAL; return -EINVAL;
} }
dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
dma_desc, (void *)sg_dma_address(sg)); dma_desc, (unsigned long long)sg_dma_address(sg));
dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
...@@ -382,7 +382,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) ...@@ -382,7 +382,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
if (!td_desc) { if (!td_desc) {
dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
goto err; goto out;
} }
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
...@@ -410,7 +410,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) ...@@ -410,7 +410,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
err: err:
kfree(td_desc->desc_list); kfree(td_desc->desc_list);
kfree(td_desc); kfree(td_desc);
out:
return NULL; return NULL;
} }
......
...@@ -3030,6 +3030,34 @@ static void __init iommu_exit_mempool(void) ...@@ -3030,6 +3030,34 @@ static void __init iommu_exit_mempool(void)
} }
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{
struct dmar_drhd_unit *drhd;
u32 vtbar;
int rc;
/* We know that this device on this chipset has its own IOMMU.
* If we find it under a different IOMMU, then the BIOS is lying
* to us. Hope that the IOMMU for this device is actually
* disabled, and it needs no translation...
*/
rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
if (rc) {
/* "can't" happen */
dev_info(&pdev->dev, "failed to run vt-d quirk\n");
return;
}
vtbar &= 0xffff0000;
/* we know that the this iommu should be at offset 0xa000 from vtbar */
drhd = dmar_find_matched_drhd_unit(pdev);
if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
TAINT_FIRMWARE_WORKAROUND,
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
static void __init init_no_remapping_devices(void) static void __init init_no_remapping_devices(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
......
...@@ -114,11 +114,17 @@ enum dma_ctrl_flags { ...@@ -114,11 +114,17 @@ enum dma_ctrl_flags {
* @DMA_TERMINATE_ALL: terminate all ongoing transfers * @DMA_TERMINATE_ALL: terminate all ongoing transfers
* @DMA_PAUSE: pause ongoing transfers * @DMA_PAUSE: pause ongoing transfers
* @DMA_RESUME: resume paused transfer * @DMA_RESUME: resume paused transfer
* @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
* that need to runtime reconfigure the slave channels (as opposed to passing
* configuration data in statically from the platform). An additional
* argument of struct dma_slave_config must be passed in with this
* command.
*/ */
enum dma_ctrl_cmd { enum dma_ctrl_cmd {
DMA_TERMINATE_ALL, DMA_TERMINATE_ALL,
DMA_PAUSE, DMA_PAUSE,
DMA_RESUME, DMA_RESUME,
DMA_SLAVE_CONFIG,
}; };
/** /**
...@@ -199,6 +205,71 @@ struct dma_chan_dev { ...@@ -199,6 +205,71 @@ struct dma_chan_dev {
atomic_t *idr_ref; atomic_t *idr_ref;
}; };
/**
* enum dma_slave_buswidth - defines bus with of the DMA slave
* device, source or target buses
*/
enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
};
/**
* struct dma_slave_config - dma slave channel runtime config
* @direction: whether the data shall go in or out on this slave
* channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
* legal values, DMA_BIDIRECTIONAL is not acceptable since we
* need to differentiate source and target addresses.
* @src_addr: this is the physical address where DMA slave data
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
* should be written (TX), if the source is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
* Legal values: 1, 2, 4, 8.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
* units of the src_addr_width member, not bytes) that can be sent
* in one burst to the device. Typically something like half the
* FIFO depth on I/O peripherals so you don't overflow it. This
* may or may not be applicable on memory sources.
* @dst_maxburst: same as src_maxburst but for destination target
* mutatis mutandis.
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
* The DMA device/engine has to provide support for an additional
* command in the channel config interface, DMA_SLAVE_CONFIG
* and this struct will then be passed in as an argument to the
* DMA engine device_control() function.
*
* The rationale for adding configuration information to this struct
* is as follows: if it is likely that most DMA slave controllers in
* the world will support the configuration option, then make it
* generic. If not: if it is fixed so that it be sent in static from
* the platform data, then prefer to do that. Else, if it is neither
* fixed at runtime, nor generic enough (such as bus mastership on
* some CPU family and whatnot) then create a custom slave config
* struct and pass that, then make this config a member of that
* struct, if applicable.
*/
struct dma_slave_config {
enum dma_data_direction direction;
dma_addr_t src_addr;
dma_addr_t dst_addr;
enum dma_slave_buswidth src_addr_width;
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
u32 dst_maxburst;
};
static inline const char *dma_chan_name(struct dma_chan *chan) static inline const char *dma_chan_name(struct dma_chan *chan)
{ {
return dev_name(&chan->dev->device); return dev_name(&chan->dev->device);
......
/*
* intel_mid_dma.h - Intel MID DMA Drivers
*
* Copyright (C) 2008-10 Intel Corp
* Author: Vinod Koul <vinod.koul@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*
*/
#ifndef __INTEL_MID_DMA_H__
#define __INTEL_MID_DMA_H__
#include <linux/dmaengine.h>
/*DMA transaction width, src and dstn width would be same
The DMA length must be width aligned,
for 32 bit width the length must be 32 bit (4bytes) aligned only*/
enum intel_mid_dma_width {
LNW_DMA_WIDTH_8BIT = 0x0,
LNW_DMA_WIDTH_16BIT = 0x1,
LNW_DMA_WIDTH_32BIT = 0x2,
};
/*DMA mode configurations*/
enum intel_mid_dma_mode {
LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
};
/*DMA handshaking*/
enum intel_mid_dma_hs_mode {
LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
};
/*Burst size configuration*/
enum intel_mid_dma_msize {
LNW_DMA_MSIZE_1 = 0x0,
LNW_DMA_MSIZE_4 = 0x1,
LNW_DMA_MSIZE_8 = 0x2,
LNW_DMA_MSIZE_16 = 0x3,
LNW_DMA_MSIZE_32 = 0x4,
LNW_DMA_MSIZE_64 = 0x5,
};
/**
* struct intel_mid_dma_slave - DMA slave structure
*
* @dirn: DMA trf direction
* @src_width: tx register width
* @dst_width: rx register width
* @hs_mode: HW/SW handshaking mode
* @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
* @src_msize: Source DMA burst size
* @dst_msize: Dst DMA burst size
* @device_instance: DMA peripheral device instance, we can have multiple
* peripheral device connected to single DMAC
*/
struct intel_mid_dma_slave {
enum dma_data_direction dirn;
enum intel_mid_dma_width src_width; /*width of DMA src txn*/
enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
enum intel_mid_dma_msize src_msize; /*size if src burst*/
enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
unsigned int device_instance; /*0, 1 for periphral instance*/
};
#endif /*__INTEL_MID_DMA_H__*/
/*
* Copyright (c) 2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef PCH_DMA_H
#define PCH_DMA_H
#include <linux/dmaengine.h>
enum pch_dma_width {
PCH_DMA_WIDTH_1_BYTE,
PCH_DMA_WIDTH_2_BYTES,
PCH_DMA_WIDTH_4_BYTES,
};
struct pch_dma_slave {
struct device *dma_dev;
unsigned int chan_id;
dma_addr_t tx_reg;
dma_addr_t rx_reg;
enum pch_dma_width width;
};
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册