Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
277dc7ae
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
277dc7ae
编写于
3月 25, 2012
作者:
R
Russell King
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'sa11x0-ir' into sa11x0
上级
374da9da
d138dacb
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
1702 addition
and
425 deletion
+1702
-425
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.c
+25
-0
drivers/dma/Kconfig
drivers/dma/Kconfig
+9
-0
drivers/dma/Makefile
drivers/dma/Makefile
+1
-0
drivers/dma/sa11x0-dma.c
drivers/dma/sa11x0-dma.c
+1109
-0
drivers/net/irda/Kconfig
drivers/net/irda/Kconfig
+1
-1
drivers/net/irda/sa1100_ir.c
drivers/net/irda/sa1100_ir.c
+533
-424
include/linux/sa11x0-dma.h
include/linux/sa11x0-dma.h
+24
-0
未找到文件。
arch/arm/mach-sa1100/generic.c
浏览文件 @
277dc7ae
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/ioport.h>
...
@@ -289,6 +290,29 @@ static struct platform_device sa11x0rtc_device = {
...
@@ -289,6 +290,29 @@ static struct platform_device sa11x0rtc_device = {
.
id
=
-
1
,
.
id
=
-
1
,
};
};
static
struct
resource
sa11x0dma_resources
[]
=
{
DEFINE_RES_MEM
(
__PREG
(
DDAR
(
0
)),
6
*
DMASp
),
DEFINE_RES_IRQ
(
IRQ_DMA0
),
DEFINE_RES_IRQ
(
IRQ_DMA1
),
DEFINE_RES_IRQ
(
IRQ_DMA2
),
DEFINE_RES_IRQ
(
IRQ_DMA3
),
DEFINE_RES_IRQ
(
IRQ_DMA4
),
DEFINE_RES_IRQ
(
IRQ_DMA5
),
};
static
u64
sa11x0dma_dma_mask
=
DMA_BIT_MASK
(
32
);
static
struct
platform_device
sa11x0dma_device
=
{
.
name
=
"sa11x0-dma"
,
.
id
=
-
1
,
.
dev
=
{
.
dma_mask
=
&
sa11x0dma_dma_mask
,
.
coherent_dma_mask
=
0xffffffff
,
},
.
num_resources
=
ARRAY_SIZE
(
sa11x0dma_resources
),
.
resource
=
sa11x0dma_resources
,
};
static
struct
platform_device
*
sa11x0_devices
[]
__initdata
=
{
static
struct
platform_device
*
sa11x0_devices
[]
__initdata
=
{
&
sa11x0udc_device
,
&
sa11x0udc_device
,
&
sa11x0uart1_device
,
&
sa11x0uart1_device
,
...
@@ -297,6 +321,7 @@ static struct platform_device *sa11x0_devices[] __initdata = {
...
@@ -297,6 +321,7 @@ static struct platform_device *sa11x0_devices[] __initdata = {
&
sa11x0pcmcia_device
,
&
sa11x0pcmcia_device
,
&
sa11x0fb_device
,
&
sa11x0fb_device
,
&
sa11x0rtc_device
,
&
sa11x0rtc_device
,
&
sa11x0dma_device
,
};
};
static
int
__init
sa1100_init
(
void
)
static
int
__init
sa1100_init
(
void
)
...
...
drivers/dma/Kconfig
浏览文件 @
277dc7ae
...
@@ -252,6 +252,15 @@ config EP93XX_DMA
...
@@ -252,6 +252,15 @@ config EP93XX_DMA
help
help
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
config DMA_SA11X0
tristate "SA-11x0 DMA support"
depends on ARCH_SA1100
select DMA_ENGINE
help
Support the DMA engine found on Intel StrongARM SA-1100 and
SA-1110 SoCs. This DMA engine can only be used with on-chip
devices.
config DMA_ENGINE
config DMA_ENGINE
bool
bool
...
...
drivers/dma/Makefile
浏览文件 @
277dc7ae
...
@@ -27,3 +27,4 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
...
@@ -27,3 +27,4 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA)
+=
pch_dma.o
obj-$(CONFIG_PCH_DMA)
+=
pch_dma.o
obj-$(CONFIG_AMBA_PL08X)
+=
amba-pl08x.o
obj-$(CONFIG_AMBA_PL08X)
+=
amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA)
+=
ep93xx_dma.o
obj-$(CONFIG_EP93XX_DMA)
+=
ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0)
+=
sa11x0-dma.o
drivers/dma/sa11x0-dma.c
0 → 100644
浏览文件 @
277dc7ae
/*
* SA11x0 DMAengine support
*
* Copyright (C) 2012 Russell King
* Derived in part from arch/arm/mach-sa1100/dma.c,
* Copyright (C) 2000, 2001 by Nicolas Pitre
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sa11x0-dma.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define NR_PHY_CHAN 6
#define DMA_ALIGN 3
#define DMA_MAX_SIZE 0x1fff
#define DMA_CHUNK_SIZE 0x1000
#define DMA_DDAR 0x00
#define DMA_DCSR_S 0x04
#define DMA_DCSR_C 0x08
#define DMA_DCSR_R 0x0c
#define DMA_DBSA 0x10
#define DMA_DBTA 0x14
#define DMA_DBSB 0x18
#define DMA_DBTB 0x1c
#define DMA_SIZE 0x20
#define DCSR_RUN (1 << 0)
#define DCSR_IE (1 << 1)
#define DCSR_ERROR (1 << 2)
#define DCSR_DONEA (1 << 3)
#define DCSR_STRTA (1 << 4)
#define DCSR_DONEB (1 << 5)
#define DCSR_STRTB (1 << 6)
#define DCSR_BIU (1 << 7)
#define DDAR_RW (1 << 0)
/* 0 = W, 1 = R */
#define DDAR_E (1 << 1)
/* 0 = LE, 1 = BE */
#define DDAR_BS (1 << 2)
/* 0 = BS4, 1 = BS8 */
#define DDAR_DW (1 << 3)
/* 0 = 8b, 1 = 16b */
#define DDAR_Ser0UDCTr (0x0 << 4)
#define DDAR_Ser0UDCRc (0x1 << 4)
#define DDAR_Ser1SDLCTr (0x2 << 4)
#define DDAR_Ser1SDLCRc (0x3 << 4)
#define DDAR_Ser1UARTTr (0x4 << 4)
#define DDAR_Ser1UARTRc (0x5 << 4)
#define DDAR_Ser2ICPTr (0x6 << 4)
#define DDAR_Ser2ICPRc (0x7 << 4)
#define DDAR_Ser3UARTTr (0x8 << 4)
#define DDAR_Ser3UARTRc (0x9 << 4)
#define DDAR_Ser4MCP0Tr (0xa << 4)
#define DDAR_Ser4MCP0Rc (0xb << 4)
#define DDAR_Ser4MCP1Tr (0xc << 4)
#define DDAR_Ser4MCP1Rc (0xd << 4)
#define DDAR_Ser4SSPTr (0xe << 4)
#define DDAR_Ser4SSPRc (0xf << 4)
struct
sa11x0_dma_sg
{
u32
addr
;
u32
len
;
};
struct
sa11x0_dma_desc
{
struct
dma_async_tx_descriptor
tx
;
u32
ddar
;
size_t
size
;
/* maybe protected by c->lock */
struct
list_head
node
;
unsigned
sglen
;
struct
sa11x0_dma_sg
sg
[
0
];
};
struct
sa11x0_dma_phy
;
struct
sa11x0_dma_chan
{
struct
dma_chan
chan
;
spinlock_t
lock
;
dma_cookie_t
lc
;
/* protected by c->lock */
struct
sa11x0_dma_phy
*
phy
;
enum
dma_status
status
;
struct
list_head
desc_submitted
;
struct
list_head
desc_issued
;
/* protected by d->lock */
struct
list_head
node
;
u32
ddar
;
const
char
*
name
;
};
struct
sa11x0_dma_phy
{
void
__iomem
*
base
;
struct
sa11x0_dma_dev
*
dev
;
unsigned
num
;
struct
sa11x0_dma_chan
*
vchan
;
/* Protected by c->lock */
unsigned
sg_load
;
struct
sa11x0_dma_desc
*
txd_load
;
unsigned
sg_done
;
struct
sa11x0_dma_desc
*
txd_done
;
#ifdef CONFIG_PM_SLEEP
u32
dbs
[
2
];
u32
dbt
[
2
];
u32
dcsr
;
#endif
};
struct
sa11x0_dma_dev
{
struct
dma_device
slave
;
void
__iomem
*
base
;
spinlock_t
lock
;
struct
tasklet_struct
task
;
struct
list_head
chan_pending
;
struct
list_head
desc_complete
;
struct
sa11x0_dma_phy
phy
[
NR_PHY_CHAN
];
};
static
struct
sa11x0_dma_chan
*
to_sa11x0_dma_chan
(
struct
dma_chan
*
chan
)
{
return
container_of
(
chan
,
struct
sa11x0_dma_chan
,
chan
);
}
static
struct
sa11x0_dma_dev
*
to_sa11x0_dma
(
struct
dma_device
*
dmadev
)
{
return
container_of
(
dmadev
,
struct
sa11x0_dma_dev
,
slave
);
}
static
struct
sa11x0_dma_desc
*
to_sa11x0_dma_tx
(
struct
dma_async_tx_descriptor
*
tx
)
{
return
container_of
(
tx
,
struct
sa11x0_dma_desc
,
tx
);
}
static
struct
sa11x0_dma_desc
*
sa11x0_dma_next_desc
(
struct
sa11x0_dma_chan
*
c
)
{
if
(
list_empty
(
&
c
->
desc_issued
))
return
NULL
;
return
list_first_entry
(
&
c
->
desc_issued
,
struct
sa11x0_dma_desc
,
node
);
}
static
void
sa11x0_dma_start_desc
(
struct
sa11x0_dma_phy
*
p
,
struct
sa11x0_dma_desc
*
txd
)
{
list_del
(
&
txd
->
node
);
p
->
txd_load
=
txd
;
p
->
sg_load
=
0
;
dev_vdbg
(
p
->
dev
->
slave
.
dev
,
"pchan %u: txd %p[%x]: starting: DDAR:%x
\n
"
,
p
->
num
,
txd
,
txd
->
tx
.
cookie
,
txd
->
ddar
);
}
static
void
noinline
sa11x0_dma_start_sg
(
struct
sa11x0_dma_phy
*
p
,
struct
sa11x0_dma_chan
*
c
)
{
struct
sa11x0_dma_desc
*
txd
=
p
->
txd_load
;
struct
sa11x0_dma_sg
*
sg
;
void
__iomem
*
base
=
p
->
base
;
unsigned
dbsx
,
dbtx
;
u32
dcsr
;
if
(
!
txd
)
return
;
dcsr
=
readl_relaxed
(
base
+
DMA_DCSR_R
);
/* Don't try to load the next transfer if both buffers are started */
if
((
dcsr
&
(
DCSR_STRTA
|
DCSR_STRTB
))
==
(
DCSR_STRTA
|
DCSR_STRTB
))
return
;
if
(
p
->
sg_load
==
txd
->
sglen
)
{
struct
sa11x0_dma_desc
*
txn
=
sa11x0_dma_next_desc
(
c
);
/*
* We have reached the end of the current descriptor.
* Peek at the next descriptor, and if compatible with
* the current, start processing it.
*/
if
(
txn
&&
txn
->
ddar
==
txd
->
ddar
)
{
txd
=
txn
;
sa11x0_dma_start_desc
(
p
,
txn
);
}
else
{
p
->
txd_load
=
NULL
;
return
;
}
}
sg
=
&
txd
->
sg
[
p
->
sg_load
++
];
/* Select buffer to load according to channel status */
if
(((
dcsr
&
(
DCSR_BIU
|
DCSR_STRTB
))
==
(
DCSR_BIU
|
DCSR_STRTB
))
||
((
dcsr
&
(
DCSR_BIU
|
DCSR_STRTA
))
==
0
))
{
dbsx
=
DMA_DBSA
;
dbtx
=
DMA_DBTA
;
dcsr
=
DCSR_STRTA
|
DCSR_IE
|
DCSR_RUN
;
}
else
{
dbsx
=
DMA_DBSB
;
dbtx
=
DMA_DBTB
;
dcsr
=
DCSR_STRTB
|
DCSR_IE
|
DCSR_RUN
;
}
writel_relaxed
(
sg
->
addr
,
base
+
dbsx
);
writel_relaxed
(
sg
->
len
,
base
+
dbtx
);
writel
(
dcsr
,
base
+
DMA_DCSR_S
);
dev_dbg
(
p
->
dev
->
slave
.
dev
,
"pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x
\n
"
,
p
->
num
,
dcsr
,
'A'
+
(
dbsx
==
DMA_DBSB
),
sg
->
addr
,
'A'
+
(
dbtx
==
DMA_DBTB
),
sg
->
len
);
}
static
void
noinline
sa11x0_dma_complete
(
struct
sa11x0_dma_phy
*
p
,
struct
sa11x0_dma_chan
*
c
)
{
struct
sa11x0_dma_desc
*
txd
=
p
->
txd_done
;
if
(
++
p
->
sg_done
==
txd
->
sglen
)
{
struct
sa11x0_dma_dev
*
d
=
p
->
dev
;
dev_vdbg
(
d
->
slave
.
dev
,
"pchan %u: txd %p[%x]: completed
\n
"
,
p
->
num
,
p
->
txd_done
,
p
->
txd_done
->
tx
.
cookie
);
c
->
lc
=
txd
->
tx
.
cookie
;
spin_lock
(
&
d
->
lock
);
list_add_tail
(
&
txd
->
node
,
&
d
->
desc_complete
);
spin_unlock
(
&
d
->
lock
);
p
->
sg_done
=
0
;
p
->
txd_done
=
p
->
txd_load
;
tasklet_schedule
(
&
d
->
task
);
}
sa11x0_dma_start_sg
(
p
,
c
);
}
static
irqreturn_t
sa11x0_dma_irq
(
int
irq
,
void
*
dev_id
)
{
struct
sa11x0_dma_phy
*
p
=
dev_id
;
struct
sa11x0_dma_dev
*
d
=
p
->
dev
;
struct
sa11x0_dma_chan
*
c
;
u32
dcsr
;
dcsr
=
readl_relaxed
(
p
->
base
+
DMA_DCSR_R
);
if
(
!
(
dcsr
&
(
DCSR_ERROR
|
DCSR_DONEA
|
DCSR_DONEB
)))
return
IRQ_NONE
;
/* Clear reported status bits */
writel_relaxed
(
dcsr
&
(
DCSR_ERROR
|
DCSR_DONEA
|
DCSR_DONEB
),
p
->
base
+
DMA_DCSR_C
);
dev_dbg
(
d
->
slave
.
dev
,
"pchan %u: irq: DCSR:%02x
\n
"
,
p
->
num
,
dcsr
);
if
(
dcsr
&
DCSR_ERROR
)
{
dev_err
(
d
->
slave
.
dev
,
"pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x
\n
"
,
p
->
num
,
dcsr
,
readl_relaxed
(
p
->
base
+
DMA_DDAR
),
readl_relaxed
(
p
->
base
+
DMA_DBSA
),
readl_relaxed
(
p
->
base
+
DMA_DBTA
),
readl_relaxed
(
p
->
base
+
DMA_DBSB
),
readl_relaxed
(
p
->
base
+
DMA_DBTB
));
}
c
=
p
->
vchan
;
if
(
c
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
/*
* Now that we're holding the lock, check that the vchan
* really is associated with this pchan before touching the
* hardware. This should always succeed, because we won't
* change p->vchan or c->phy while the channel is actively
* transferring.
*/
if
(
c
->
phy
==
p
)
{
if
(
dcsr
&
DCSR_DONEA
)
sa11x0_dma_complete
(
p
,
c
);
if
(
dcsr
&
DCSR_DONEB
)
sa11x0_dma_complete
(
p
,
c
);
}
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
}
return
IRQ_HANDLED
;
}
static
void
sa11x0_dma_start_txd
(
struct
sa11x0_dma_chan
*
c
)
{
struct
sa11x0_dma_desc
*
txd
=
sa11x0_dma_next_desc
(
c
);
/* If the issued list is empty, we have no further txds to process */
if
(
txd
)
{
struct
sa11x0_dma_phy
*
p
=
c
->
phy
;
sa11x0_dma_start_desc
(
p
,
txd
);
p
->
txd_done
=
txd
;
p
->
sg_done
=
0
;
/* The channel should not have any transfers started */
WARN_ON
(
readl_relaxed
(
p
->
base
+
DMA_DCSR_R
)
&
(
DCSR_STRTA
|
DCSR_STRTB
));
/* Clear the run and start bits before changing DDAR */
writel_relaxed
(
DCSR_RUN
|
DCSR_STRTA
|
DCSR_STRTB
,
p
->
base
+
DMA_DCSR_C
);
writel_relaxed
(
txd
->
ddar
,
p
->
base
+
DMA_DDAR
);
/* Try to start both buffers */
sa11x0_dma_start_sg
(
p
,
c
);
sa11x0_dma_start_sg
(
p
,
c
);
}
}
static
void
sa11x0_dma_tasklet
(
unsigned
long
arg
)
{
struct
sa11x0_dma_dev
*
d
=
(
struct
sa11x0_dma_dev
*
)
arg
;
struct
sa11x0_dma_phy
*
p
;
struct
sa11x0_dma_chan
*
c
;
struct
sa11x0_dma_desc
*
txd
,
*
txn
;
LIST_HEAD
(
head
);
unsigned
pch
,
pch_alloc
=
0
;
dev_dbg
(
d
->
slave
.
dev
,
"tasklet enter
\n
"
);
/* Get the completed tx descriptors */
spin_lock_irq
(
&
d
->
lock
);
list_splice_init
(
&
d
->
desc_complete
,
&
head
);
spin_unlock_irq
(
&
d
->
lock
);
list_for_each_entry
(
txd
,
&
head
,
node
)
{
c
=
to_sa11x0_dma_chan
(
txd
->
tx
.
chan
);
dev_dbg
(
d
->
slave
.
dev
,
"vchan %p: txd %p[%x] completed
\n
"
,
c
,
txd
,
txd
->
tx
.
cookie
);
spin_lock_irq
(
&
c
->
lock
);
p
=
c
->
phy
;
if
(
p
)
{
if
(
!
p
->
txd_done
)
sa11x0_dma_start_txd
(
c
);
if
(
!
p
->
txd_done
)
{
/* No current txd associated with this channel */
dev_dbg
(
d
->
slave
.
dev
,
"pchan %u: free
\n
"
,
p
->
num
);
/* Mark this channel free */
c
->
phy
=
NULL
;
p
->
vchan
=
NULL
;
}
}
spin_unlock_irq
(
&
c
->
lock
);
}
spin_lock_irq
(
&
d
->
lock
);
for
(
pch
=
0
;
pch
<
NR_PHY_CHAN
;
pch
++
)
{
p
=
&
d
->
phy
[
pch
];
if
(
p
->
vchan
==
NULL
&&
!
list_empty
(
&
d
->
chan_pending
))
{
c
=
list_first_entry
(
&
d
->
chan_pending
,
struct
sa11x0_dma_chan
,
node
);
list_del_init
(
&
c
->
node
);
pch_alloc
|=
1
<<
pch
;
/* Mark this channel allocated */
p
->
vchan
=
c
;
dev_dbg
(
d
->
slave
.
dev
,
"pchan %u: alloc vchan %p
\n
"
,
pch
,
c
);
}
}
spin_unlock_irq
(
&
d
->
lock
);
for
(
pch
=
0
;
pch
<
NR_PHY_CHAN
;
pch
++
)
{
if
(
pch_alloc
&
(
1
<<
pch
))
{
p
=
&
d
->
phy
[
pch
];
c
=
p
->
vchan
;
spin_lock_irq
(
&
c
->
lock
);
c
->
phy
=
p
;
sa11x0_dma_start_txd
(
c
);
spin_unlock_irq
(
&
c
->
lock
);
}
}
/* Now free the completed tx descriptor, and call their callbacks */
list_for_each_entry_safe
(
txd
,
txn
,
&
head
,
node
)
{
dma_async_tx_callback
callback
=
txd
->
tx
.
callback
;
void
*
callback_param
=
txd
->
tx
.
callback_param
;
dev_dbg
(
d
->
slave
.
dev
,
"txd %p[%x]: callback and free
\n
"
,
txd
,
txd
->
tx
.
cookie
);
kfree
(
txd
);
if
(
callback
)
callback
(
callback_param
);
}
dev_dbg
(
d
->
slave
.
dev
,
"tasklet exit
\n
"
);
}
static
void
sa11x0_dma_desc_free
(
struct
sa11x0_dma_dev
*
d
,
struct
list_head
*
head
)
{
struct
sa11x0_dma_desc
*
txd
,
*
txn
;
list_for_each_entry_safe
(
txd
,
txn
,
head
,
node
)
{
dev_dbg
(
d
->
slave
.
dev
,
"txd %p: freeing
\n
"
,
txd
);
kfree
(
txd
);
}
}
static
int
sa11x0_dma_alloc_chan_resources
(
struct
dma_chan
*
chan
)
{
return
0
;
}
static
void
sa11x0_dma_free_chan_resources
(
struct
dma_chan
*
chan
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_dev
*
d
=
to_sa11x0_dma
(
chan
->
device
);
unsigned
long
flags
;
LIST_HEAD
(
head
);
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
spin_lock
(
&
d
->
lock
);
list_del_init
(
&
c
->
node
);
spin_unlock
(
&
d
->
lock
);
list_splice_tail_init
(
&
c
->
desc_submitted
,
&
head
);
list_splice_tail_init
(
&
c
->
desc_issued
,
&
head
);
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
sa11x0_dma_desc_free
(
d
,
&
head
);
}
static
dma_addr_t
sa11x0_dma_pos
(
struct
sa11x0_dma_phy
*
p
)
{
unsigned
reg
;
u32
dcsr
;
dcsr
=
readl_relaxed
(
p
->
base
+
DMA_DCSR_R
);
if
((
dcsr
&
(
DCSR_BIU
|
DCSR_STRTA
))
==
DCSR_STRTA
||
(
dcsr
&
(
DCSR_BIU
|
DCSR_STRTB
))
==
DCSR_BIU
)
reg
=
DMA_DBSA
;
else
reg
=
DMA_DBSB
;
return
readl_relaxed
(
p
->
base
+
reg
);
}
static
enum
dma_status
sa11x0_dma_tx_status
(
struct
dma_chan
*
chan
,
dma_cookie_t
cookie
,
struct
dma_tx_state
*
state
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_dev
*
d
=
to_sa11x0_dma
(
chan
->
device
);
struct
sa11x0_dma_phy
*
p
;
struct
sa11x0_dma_desc
*
txd
;
dma_cookie_t
last_used
,
last_complete
;
unsigned
long
flags
;
enum
dma_status
ret
;
size_t
bytes
=
0
;
last_used
=
c
->
chan
.
cookie
;
last_complete
=
c
->
lc
;
ret
=
dma_async_is_complete
(
cookie
,
last_complete
,
last_used
);
if
(
ret
==
DMA_SUCCESS
)
{
dma_set_tx_state
(
state
,
last_complete
,
last_used
,
0
);
return
ret
;
}
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
p
=
c
->
phy
;
ret
=
c
->
status
;
if
(
p
)
{
dma_addr_t
addr
=
sa11x0_dma_pos
(
p
);
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: addr:%x
\n
"
,
addr
);
txd
=
p
->
txd_done
;
if
(
txd
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
txd
->
sglen
;
i
++
)
{
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: [%u] %x+%x
\n
"
,
i
,
txd
->
sg
[
i
].
addr
,
txd
->
sg
[
i
].
len
);
if
(
addr
>=
txd
->
sg
[
i
].
addr
&&
addr
<
txd
->
sg
[
i
].
addr
+
txd
->
sg
[
i
].
len
)
{
unsigned
len
;
len
=
txd
->
sg
[
i
].
len
-
(
addr
-
txd
->
sg
[
i
].
addr
);
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: [%u] +%x
\n
"
,
i
,
len
);
bytes
+=
len
;
i
++
;
break
;
}
}
for
(;
i
<
txd
->
sglen
;
i
++
)
{
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: [%u] %x+%x ++
\n
"
,
i
,
txd
->
sg
[
i
].
addr
,
txd
->
sg
[
i
].
len
);
bytes
+=
txd
->
sg
[
i
].
len
;
}
}
if
(
txd
!=
p
->
txd_load
&&
p
->
txd_load
)
bytes
+=
p
->
txd_load
->
size
;
}
list_for_each_entry
(
txd
,
&
c
->
desc_issued
,
node
)
{
bytes
+=
txd
->
size
;
}
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
dma_set_tx_state
(
state
,
last_complete
,
last_used
,
bytes
);
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: bytes 0x%zx
\n
"
,
bytes
);
return
ret
;
}
/*
* Move pending txds to the issued list, and re-init pending list.
* If not already pending, add this channel to the list of pending
* channels and trigger the tasklet to run.
*/
static
void
sa11x0_dma_issue_pending
(
struct
dma_chan
*
chan
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_dev
*
d
=
to_sa11x0_dma
(
chan
->
device
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
list_splice_tail_init
(
&
c
->
desc_submitted
,
&
c
->
desc_issued
);
if
(
!
list_empty
(
&
c
->
desc_issued
))
{
spin_lock
(
&
d
->
lock
);
if
(
!
c
->
phy
&&
list_empty
(
&
c
->
node
))
{
list_add_tail
(
&
c
->
node
,
&
d
->
chan_pending
);
tasklet_schedule
(
&
d
->
task
);
dev_dbg
(
d
->
slave
.
dev
,
"vchan %p: issued
\n
"
,
c
);
}
spin_unlock
(
&
d
->
lock
);
}
else
dev_dbg
(
d
->
slave
.
dev
,
"vchan %p: nothing to issue
\n
"
,
c
);
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
}
static
dma_cookie_t
sa11x0_dma_tx_submit
(
struct
dma_async_tx_descriptor
*
tx
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
tx
->
chan
);
struct
sa11x0_dma_desc
*
txd
=
to_sa11x0_dma_tx
(
tx
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
c
->
chan
.
cookie
+=
1
;
if
(
c
->
chan
.
cookie
<
0
)
c
->
chan
.
cookie
=
1
;
txd
->
tx
.
cookie
=
c
->
chan
.
cookie
;
list_add_tail
(
&
txd
->
node
,
&
c
->
desc_submitted
);
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
dev_dbg
(
tx
->
chan
->
device
->
dev
,
"vchan %p: txd %p[%x]: submitted
\n
"
,
c
,
txd
,
txd
->
tx
.
cookie
);
return
txd
->
tx
.
cookie
;
}
static
struct
dma_async_tx_descriptor
*
sa11x0_dma_prep_slave_sg
(
struct
dma_chan
*
chan
,
struct
scatterlist
*
sg
,
unsigned
int
sglen
,
enum
dma_transfer_direction
dir
,
unsigned
long
flags
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_desc
*
txd
;
struct
scatterlist
*
sgent
;
unsigned
i
,
j
=
sglen
;
size_t
size
=
0
;
/* SA11x0 channels can only operate in their native direction */
if
(
dir
!=
(
c
->
ddar
&
DDAR_RW
?
DMA_DEV_TO_MEM
:
DMA_MEM_TO_DEV
))
{
dev_err
(
chan
->
device
->
dev
,
"vchan %p: bad DMA direction: DDAR:%08x dir:%u
\n
"
,
c
,
c
->
ddar
,
dir
);
return
NULL
;
}
/* Do not allow zero-sized txds */
if
(
sglen
==
0
)
return
NULL
;
for_each_sg
(
sg
,
sgent
,
sglen
,
i
)
{
dma_addr_t
addr
=
sg_dma_address
(
sgent
);
unsigned
int
len
=
sg_dma_len
(
sgent
);
if
(
len
>
DMA_MAX_SIZE
)
j
+=
DIV_ROUND_UP
(
len
,
DMA_MAX_SIZE
&
~
DMA_ALIGN
)
-
1
;
if
(
addr
&
DMA_ALIGN
)
{
dev_dbg
(
chan
->
device
->
dev
,
"vchan %p: bad buffer alignment: %08x
\n
"
,
c
,
addr
);
return
NULL
;
}
}
txd
=
kzalloc
(
sizeof
(
*
txd
)
+
j
*
sizeof
(
txd
->
sg
[
0
]),
GFP_ATOMIC
);
if
(
!
txd
)
{
dev_dbg
(
chan
->
device
->
dev
,
"vchan %p: kzalloc failed
\n
"
,
c
);
return
NULL
;
}
j
=
0
;
for_each_sg
(
sg
,
sgent
,
sglen
,
i
)
{
dma_addr_t
addr
=
sg_dma_address
(
sgent
);
unsigned
len
=
sg_dma_len
(
sgent
);
size
+=
len
;
do
{
unsigned
tlen
=
len
;
/*
* Check whether the transfer will fit. If not, try
* to split the transfer up such that we end up with
* equal chunks - but make sure that we preserve the
* alignment. This avoids small segments.
*/
if
(
tlen
>
DMA_MAX_SIZE
)
{
unsigned
mult
=
DIV_ROUND_UP
(
tlen
,
DMA_MAX_SIZE
&
~
DMA_ALIGN
);
tlen
=
(
tlen
/
mult
)
&
~
DMA_ALIGN
;
}
txd
->
sg
[
j
].
addr
=
addr
;
txd
->
sg
[
j
].
len
=
tlen
;
addr
+=
tlen
;
len
-=
tlen
;
j
++
;
}
while
(
len
);
}
dma_async_tx_descriptor_init
(
&
txd
->
tx
,
&
c
->
chan
);
txd
->
tx
.
flags
=
flags
;
txd
->
tx
.
tx_submit
=
sa11x0_dma_tx_submit
;
txd
->
ddar
=
c
->
ddar
;
txd
->
size
=
size
;
txd
->
sglen
=
j
;
dev_dbg
(
chan
->
device
->
dev
,
"vchan %p: txd %p: size %u nr %u
\n
"
,
c
,
txd
,
txd
->
size
,
txd
->
sglen
);
return
&
txd
->
tx
;
}
static
int
sa11x0_dma_slave_config
(
struct
sa11x0_dma_chan
*
c
,
struct
dma_slave_config
*
cfg
)
{
u32
ddar
=
c
->
ddar
&
((
0xf
<<
4
)
|
DDAR_RW
);
dma_addr_t
addr
;
enum
dma_slave_buswidth
width
;
u32
maxburst
;
if
(
ddar
&
DDAR_RW
)
{
addr
=
cfg
->
src_addr
;
width
=
cfg
->
src_addr_width
;
maxburst
=
cfg
->
src_maxburst
;
}
else
{
addr
=
cfg
->
dst_addr
;
width
=
cfg
->
dst_addr_width
;
maxburst
=
cfg
->
dst_maxburst
;
}
if
((
width
!=
DMA_SLAVE_BUSWIDTH_1_BYTE
&&
width
!=
DMA_SLAVE_BUSWIDTH_2_BYTES
)
||
(
maxburst
!=
4
&&
maxburst
!=
8
))
return
-
EINVAL
;
if
(
width
==
DMA_SLAVE_BUSWIDTH_2_BYTES
)
ddar
|=
DDAR_DW
;
if
(
maxburst
==
8
)
ddar
|=
DDAR_BS
;
dev_dbg
(
c
->
chan
.
device
->
dev
,
"vchan %p: dma_slave_config addr %x width %u burst %u
\n
"
,
c
,
addr
,
width
,
maxburst
);
c
->
ddar
=
ddar
|
(
addr
&
0xf0000000
)
|
(
addr
&
0x003ffffc
)
<<
6
;
return
0
;
}
static
int
sa11x0_dma_control
(
struct
dma_chan
*
chan
,
enum
dma_ctrl_cmd
cmd
,
unsigned
long
arg
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_dev
*
d
=
to_sa11x0_dma
(
chan
->
device
);
struct
sa11x0_dma_phy
*
p
;
LIST_HEAD
(
head
);
unsigned
long
flags
;
int
ret
;
switch
(
cmd
)
{
case
DMA_SLAVE_CONFIG
:
return
sa11x0_dma_slave_config
(
c
,
(
struct
dma_slave_config
*
)
arg
);
case
DMA_TERMINATE_ALL
:
dev_dbg
(
d
->
slave
.
dev
,
"vchan %p: terminate all
\n
"
,
c
);
/* Clear the tx descriptor lists */
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
list_splice_tail_init
(
&
c
->
desc_submitted
,
&
head
);
list_splice_tail_init
(
&
c
->
desc_issued
,
&
head
);
p
=
c
->
phy
;
if
(
p
)
{
struct
sa11x0_dma_desc
*
txd
,
*
txn
;
dev_dbg
(
d
->
slave
.
dev
,
"pchan %u: terminating
\n
"
,
p
->
num
);
/* vchan is assigned to a pchan - stop the channel */
writel
(
DCSR_RUN
|
DCSR_IE
|
DCSR_STRTA
|
DCSR_DONEA
|
DCSR_STRTB
|
DCSR_DONEB
,
p
->
base
+
DMA_DCSR_C
);
list_for_each_entry_safe
(
txd
,
txn
,
&
d
->
desc_complete
,
node
)
if
(
txd
->
tx
.
chan
==
&
c
->
chan
)
list_move
(
&
txd
->
node
,
&
head
);
if
(
p
->
txd_load
)
{
if
(
p
->
txd_load
!=
p
->
txd_done
)
list_add_tail
(
&
p
->
txd_load
->
node
,
&
head
);
p
->
txd_load
=
NULL
;
}
if
(
p
->
txd_done
)
{
list_add_tail
(
&
p
->
txd_done
->
node
,
&
head
);
p
->
txd_done
=
NULL
;
}
c
->
phy
=
NULL
;
spin_lock
(
&
d
->
lock
);
p
->
vchan
=
NULL
;
spin_unlock
(
&
d
->
lock
);
tasklet_schedule
(
&
d
->
task
);
}
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
sa11x0_dma_desc_free
(
d
,
&
head
);
ret
=
0
;
break
;
case
DMA_PAUSE
:
dev_dbg
(
d
->
slave
.
dev
,
"vchan %p: pause
\n
"
,
c
);
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
if
(
c
->
status
==
DMA_IN_PROGRESS
)
{
c
->
status
=
DMA_PAUSED
;
p
=
c
->
phy
;
if
(
p
)
{
writel
(
DCSR_RUN
|
DCSR_IE
,
p
->
base
+
DMA_DCSR_C
);
}
else
{
spin_lock
(
&
d
->
lock
);
list_del_init
(
&
c
->
node
);
spin_unlock
(
&
d
->
lock
);
}
}
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
ret
=
0
;
break
;
case
DMA_RESUME
:
dev_dbg
(
d
->
slave
.
dev
,
"vchan %p: resume
\n
"
,
c
);
spin_lock_irqsave
(
&
c
->
lock
,
flags
);
if
(
c
->
status
==
DMA_PAUSED
)
{
c
->
status
=
DMA_IN_PROGRESS
;
p
=
c
->
phy
;
if
(
p
)
{
writel
(
DCSR_RUN
|
DCSR_IE
,
p
->
base
+
DMA_DCSR_S
);
}
else
if
(
!
list_empty
(
&
c
->
desc_issued
))
{
spin_lock
(
&
d
->
lock
);
list_add_tail
(
&
c
->
node
,
&
d
->
chan_pending
);
spin_unlock
(
&
d
->
lock
);
}
}
spin_unlock_irqrestore
(
&
c
->
lock
,
flags
);
ret
=
0
;
break
;
default:
ret
=
-
ENXIO
;
break
;
}
return
ret
;
}
struct
sa11x0_dma_channel_desc
{
u32
ddar
;
const
char
*
name
;
};
#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
static
const
struct
sa11x0_dma_channel_desc
chan_desc
[]
=
{
CD
(
Ser0UDCTr
,
0
),
CD
(
Ser0UDCRc
,
DDAR_RW
),
CD
(
Ser1SDLCTr
,
0
),
CD
(
Ser1SDLCRc
,
DDAR_RW
),
CD
(
Ser1UARTTr
,
0
),
CD
(
Ser1UARTRc
,
DDAR_RW
),
CD
(
Ser2ICPTr
,
0
),
CD
(
Ser2ICPRc
,
DDAR_RW
),
CD
(
Ser3UARTTr
,
0
),
CD
(
Ser3UARTRc
,
DDAR_RW
),
CD
(
Ser4MCP0Tr
,
0
),
CD
(
Ser4MCP0Rc
,
DDAR_RW
),
CD
(
Ser4MCP1Tr
,
0
),
CD
(
Ser4MCP1Rc
,
DDAR_RW
),
CD
(
Ser4SSPTr
,
0
),
CD
(
Ser4SSPRc
,
DDAR_RW
),
};
static
int
__devinit
sa11x0_dma_init_dmadev
(
struct
dma_device
*
dmadev
,
struct
device
*
dev
)
{
unsigned
i
;
dmadev
->
chancnt
=
ARRAY_SIZE
(
chan_desc
);
INIT_LIST_HEAD
(
&
dmadev
->
channels
);
dmadev
->
dev
=
dev
;
dmadev
->
device_alloc_chan_resources
=
sa11x0_dma_alloc_chan_resources
;
dmadev
->
device_free_chan_resources
=
sa11x0_dma_free_chan_resources
;
dmadev
->
device_control
=
sa11x0_dma_control
;
dmadev
->
device_tx_status
=
sa11x0_dma_tx_status
;
dmadev
->
device_issue_pending
=
sa11x0_dma_issue_pending
;
for
(
i
=
0
;
i
<
dmadev
->
chancnt
;
i
++
)
{
struct
sa11x0_dma_chan
*
c
;
c
=
kzalloc
(
sizeof
(
*
c
),
GFP_KERNEL
);
if
(
!
c
)
{
dev_err
(
dev
,
"no memory for channel %u
\n
"
,
i
);
return
-
ENOMEM
;
}
c
->
chan
.
device
=
dmadev
;
c
->
status
=
DMA_IN_PROGRESS
;
c
->
ddar
=
chan_desc
[
i
].
ddar
;
c
->
name
=
chan_desc
[
i
].
name
;
spin_lock_init
(
&
c
->
lock
);
INIT_LIST_HEAD
(
&
c
->
desc_submitted
);
INIT_LIST_HEAD
(
&
c
->
desc_issued
);
INIT_LIST_HEAD
(
&
c
->
node
);
list_add_tail
(
&
c
->
chan
.
device_node
,
&
dmadev
->
channels
);
}
return
dma_async_device_register
(
dmadev
);
}
static
int
sa11x0_dma_request_irq
(
struct
platform_device
*
pdev
,
int
nr
,
void
*
data
)
{
int
irq
=
platform_get_irq
(
pdev
,
nr
);
if
(
irq
<=
0
)
return
-
ENXIO
;
return
request_irq
(
irq
,
sa11x0_dma_irq
,
0
,
dev_name
(
&
pdev
->
dev
),
data
);
}
static
void
sa11x0_dma_free_irq
(
struct
platform_device
*
pdev
,
int
nr
,
void
*
data
)
{
int
irq
=
platform_get_irq
(
pdev
,
nr
);
if
(
irq
>
0
)
free_irq
(
irq
,
data
);
}
static
void
sa11x0_dma_free_channels
(
struct
dma_device
*
dmadev
)
{
struct
sa11x0_dma_chan
*
c
,
*
cn
;
list_for_each_entry_safe
(
c
,
cn
,
&
dmadev
->
channels
,
chan
.
device_node
)
{
list_del
(
&
c
->
chan
.
device_node
);
kfree
(
c
);
}
}
static
int
__devinit
sa11x0_dma_probe
(
struct
platform_device
*
pdev
)
{
struct
sa11x0_dma_dev
*
d
;
struct
resource
*
res
;
unsigned
i
;
int
ret
;
res
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
0
);
if
(
!
res
)
return
-
ENXIO
;
d
=
kzalloc
(
sizeof
(
*
d
),
GFP_KERNEL
);
if
(
!
d
)
{
ret
=
-
ENOMEM
;
goto
err_alloc
;
}
spin_lock_init
(
&
d
->
lock
);
INIT_LIST_HEAD
(
&
d
->
chan_pending
);
INIT_LIST_HEAD
(
&
d
->
desc_complete
);
d
->
base
=
ioremap
(
res
->
start
,
resource_size
(
res
));
if
(
!
d
->
base
)
{
ret
=
-
ENOMEM
;
goto
err_ioremap
;
}
tasklet_init
(
&
d
->
task
,
sa11x0_dma_tasklet
,
(
unsigned
long
)
d
);
for
(
i
=
0
;
i
<
NR_PHY_CHAN
;
i
++
)
{
struct
sa11x0_dma_phy
*
p
=
&
d
->
phy
[
i
];
p
->
dev
=
d
;
p
->
num
=
i
;
p
->
base
=
d
->
base
+
i
*
DMA_SIZE
;
writel_relaxed
(
DCSR_RUN
|
DCSR_IE
|
DCSR_ERROR
|
DCSR_DONEA
|
DCSR_STRTA
|
DCSR_DONEB
|
DCSR_STRTB
,
p
->
base
+
DMA_DCSR_C
);
writel_relaxed
(
0
,
p
->
base
+
DMA_DDAR
);
ret
=
sa11x0_dma_request_irq
(
pdev
,
i
,
p
);
if
(
ret
)
{
while
(
i
)
{
i
--
;
sa11x0_dma_free_irq
(
pdev
,
i
,
&
d
->
phy
[
i
]);
}
goto
err_irq
;
}
}
dma_cap_set
(
DMA_SLAVE
,
d
->
slave
.
cap_mask
);
d
->
slave
.
device_prep_slave_sg
=
sa11x0_dma_prep_slave_sg
;
ret
=
sa11x0_dma_init_dmadev
(
&
d
->
slave
,
&
pdev
->
dev
);
if
(
ret
)
{
dev_warn
(
d
->
slave
.
dev
,
"failed to register slave async device: %d
\n
"
,
ret
);
goto
err_slave_reg
;
}
platform_set_drvdata
(
pdev
,
d
);
return
0
;
err_slave_reg:
sa11x0_dma_free_channels
(
&
d
->
slave
);
for
(
i
=
0
;
i
<
NR_PHY_CHAN
;
i
++
)
sa11x0_dma_free_irq
(
pdev
,
i
,
&
d
->
phy
[
i
]);
err_irq:
tasklet_kill
(
&
d
->
task
);
iounmap
(
d
->
base
);
err_ioremap:
kfree
(
d
);
err_alloc:
return
ret
;
}
static
int
__devexit
sa11x0_dma_remove
(
struct
platform_device
*
pdev
)
{
struct
sa11x0_dma_dev
*
d
=
platform_get_drvdata
(
pdev
);
unsigned
pch
;
dma_async_device_unregister
(
&
d
->
slave
);
sa11x0_dma_free_channels
(
&
d
->
slave
);
for
(
pch
=
0
;
pch
<
NR_PHY_CHAN
;
pch
++
)
sa11x0_dma_free_irq
(
pdev
,
pch
,
&
d
->
phy
[
pch
]);
tasklet_kill
(
&
d
->
task
);
iounmap
(
d
->
base
);
kfree
(
d
);
return
0
;
}
#ifdef CONFIG_PM_SLEEP
static
int
sa11x0_dma_suspend
(
struct
device
*
dev
)
{
struct
sa11x0_dma_dev
*
d
=
dev_get_drvdata
(
dev
);
unsigned
pch
;
for
(
pch
=
0
;
pch
<
NR_PHY_CHAN
;
pch
++
)
{
struct
sa11x0_dma_phy
*
p
=
&
d
->
phy
[
pch
];
u32
dcsr
,
saved_dcsr
;
dcsr
=
saved_dcsr
=
readl_relaxed
(
p
->
base
+
DMA_DCSR_R
);
if
(
dcsr
&
DCSR_RUN
)
{
writel
(
DCSR_RUN
|
DCSR_IE
,
p
->
base
+
DMA_DCSR_C
);
dcsr
=
readl_relaxed
(
p
->
base
+
DMA_DCSR_R
);
}
saved_dcsr
&=
DCSR_RUN
|
DCSR_IE
;
if
(
dcsr
&
DCSR_BIU
)
{
p
->
dbs
[
0
]
=
readl_relaxed
(
p
->
base
+
DMA_DBSB
);
p
->
dbt
[
0
]
=
readl_relaxed
(
p
->
base
+
DMA_DBTB
);
p
->
dbs
[
1
]
=
readl_relaxed
(
p
->
base
+
DMA_DBSA
);
p
->
dbt
[
1
]
=
readl_relaxed
(
p
->
base
+
DMA_DBTA
);
saved_dcsr
|=
(
dcsr
&
DCSR_STRTA
?
DCSR_STRTB
:
0
)
|
(
dcsr
&
DCSR_STRTB
?
DCSR_STRTA
:
0
);
}
else
{
p
->
dbs
[
0
]
=
readl_relaxed
(
p
->
base
+
DMA_DBSA
);
p
->
dbt
[
0
]
=
readl_relaxed
(
p
->
base
+
DMA_DBTA
);
p
->
dbs
[
1
]
=
readl_relaxed
(
p
->
base
+
DMA_DBSB
);
p
->
dbt
[
1
]
=
readl_relaxed
(
p
->
base
+
DMA_DBTB
);
saved_dcsr
|=
dcsr
&
(
DCSR_STRTA
|
DCSR_STRTB
);
}
p
->
dcsr
=
saved_dcsr
;
writel
(
DCSR_STRTA
|
DCSR_STRTB
,
p
->
base
+
DMA_DCSR_C
);
}
return
0
;
}
static
int
sa11x0_dma_resume
(
struct
device
*
dev
)
{
struct
sa11x0_dma_dev
*
d
=
dev_get_drvdata
(
dev
);
unsigned
pch
;
for
(
pch
=
0
;
pch
<
NR_PHY_CHAN
;
pch
++
)
{
struct
sa11x0_dma_phy
*
p
=
&
d
->
phy
[
pch
];
struct
sa11x0_dma_desc
*
txd
=
NULL
;
u32
dcsr
=
readl_relaxed
(
p
->
base
+
DMA_DCSR_R
);
WARN_ON
(
dcsr
&
(
DCSR_BIU
|
DCSR_STRTA
|
DCSR_STRTB
|
DCSR_RUN
));
if
(
p
->
txd_done
)
txd
=
p
->
txd_done
;
else
if
(
p
->
txd_load
)
txd
=
p
->
txd_load
;
if
(
!
txd
)
continue
;
writel_relaxed
(
txd
->
ddar
,
p
->
base
+
DMA_DDAR
);
writel_relaxed
(
p
->
dbs
[
0
],
p
->
base
+
DMA_DBSA
);
writel_relaxed
(
p
->
dbt
[
0
],
p
->
base
+
DMA_DBTA
);
writel_relaxed
(
p
->
dbs
[
1
],
p
->
base
+
DMA_DBSB
);
writel_relaxed
(
p
->
dbt
[
1
],
p
->
base
+
DMA_DBTB
);
writel_relaxed
(
p
->
dcsr
,
p
->
base
+
DMA_DCSR_S
);
}
return
0
;
}
#endif
static
const
struct
dev_pm_ops
sa11x0_dma_pm_ops
=
{
.
suspend_noirq
=
sa11x0_dma_suspend
,
.
resume_noirq
=
sa11x0_dma_resume
,
.
freeze_noirq
=
sa11x0_dma_suspend
,
.
thaw_noirq
=
sa11x0_dma_resume
,
.
poweroff_noirq
=
sa11x0_dma_suspend
,
.
restore_noirq
=
sa11x0_dma_resume
,
};
static
struct
platform_driver
sa11x0_dma_driver
=
{
.
driver
=
{
.
name
=
"sa11x0-dma"
,
.
owner
=
THIS_MODULE
,
.
pm
=
&
sa11x0_dma_pm_ops
,
},
.
probe
=
sa11x0_dma_probe
,
.
remove
=
__devexit_p
(
sa11x0_dma_remove
),
};
bool
sa11x0_dma_filter_fn
(
struct
dma_chan
*
chan
,
void
*
param
)
{
if
(
chan
->
device
->
dev
->
driver
==
&
sa11x0_dma_driver
.
driver
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
const
char
*
p
=
param
;
return
!
strcmp
(
c
->
name
,
p
);
}
return
false
;
}
EXPORT_SYMBOL
(
sa11x0_dma_filter_fn
);
static
int
__init
sa11x0_dma_init
(
void
)
{
return
platform_driver_register
(
&
sa11x0_dma_driver
);
}
subsys_initcall
(
sa11x0_dma_init
);
static
void
__exit
sa11x0_dma_exit
(
void
)
{
platform_driver_unregister
(
&
sa11x0_dma_driver
);
}
module_exit
(
sa11x0_dma_exit
);
MODULE_AUTHOR
(
"Russell King"
);
MODULE_DESCRIPTION
(
"SA-11x0 DMA driver"
);
MODULE_LICENSE
(
"GPL v2"
);
MODULE_ALIAS
(
"platform:sa11x0-dma"
);
drivers/net/irda/Kconfig
浏览文件 @
277dc7ae
...
@@ -356,7 +356,7 @@ config VLSI_FIR
...
@@ -356,7 +356,7 @@ config VLSI_FIR
config SA1100_FIR
config SA1100_FIR
tristate "SA1100 Internal IR"
tristate "SA1100 Internal IR"
depends on ARCH_SA1100 && IRDA
depends on ARCH_SA1100 && IRDA
&& DMA_SA11X0
config VIA_FIR
config VIA_FIR
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
...
...
drivers/net/irda/sa1100_ir.c
浏览文件 @
277dc7ae
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
* This driver takes one kernel command line parameter, sa1100ir=, with
* This driver takes one kernel command line parameter, sa1100ir=, with
* the following options:
* the following options:
* max_rate:baudrate - set the maximum baud rate
* max_rate:baudrate - set the maximum baud rate
* power_leve:level - set the transmitter power level
* power_leve
l
:level - set the transmitter power level
* tx_lpm:0|1 - set transmit low power mode
* tx_lpm:0|1 - set transmit low power mode
*/
*/
#include <linux/module.h>
#include <linux/module.h>
...
@@ -30,13 +30,13 @@
...
@@ -30,13 +30,13 @@
#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/sa11x0-dma.h>
#include <net/irda/irda.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
#include <net/irda/irda_device.h>
#include <asm/irq.h>
#include <mach/dma.h>
#include <mach/hardware.h>
#include <mach/hardware.h>
#include <asm/mach/irda.h>
#include <asm/mach/irda.h>
...
@@ -44,8 +44,15 @@ static int power_level = 3;
...
@@ -44,8 +44,15 @@ static int power_level = 3;
static
int
tx_lpm
;
static
int
tx_lpm
;
static
int
max_rate
=
4000000
;
static
int
max_rate
=
4000000
;
struct
sa1100_buf
{
struct
device
*
dev
;
struct
sk_buff
*
skb
;
struct
scatterlist
sg
;
struct
dma_chan
*
chan
;
dma_cookie_t
cookie
;
};
struct
sa1100_irda
{
struct
sa1100_irda
{
unsigned
char
hscr0
;
unsigned
char
utcr4
;
unsigned
char
utcr4
;
unsigned
char
power
;
unsigned
char
power
;
unsigned
char
open
;
unsigned
char
open
;
...
@@ -53,12 +60,8 @@ struct sa1100_irda {
...
@@ -53,12 +60,8 @@ struct sa1100_irda {
int
speed
;
int
speed
;
int
newspeed
;
int
newspeed
;
struct
sk_buff
*
txskb
;
struct
sa1100_buf
dma_rx
;
struct
sk_buff
*
rxskb
;
struct
sa1100_buf
dma_tx
;
dma_addr_t
txbuf_dma
;
dma_addr_t
rxbuf_dma
;
dma_regs_t
*
txdma
;
dma_regs_t
*
rxdma
;
struct
device
*
dev
;
struct
device
*
dev
;
struct
irda_platform_data
*
pdata
;
struct
irda_platform_data
*
pdata
;
...
@@ -67,23 +70,103 @@ struct sa1100_irda {
...
@@ -67,23 +70,103 @@ struct sa1100_irda {
iobuff_t
tx_buff
;
iobuff_t
tx_buff
;
iobuff_t
rx_buff
;
iobuff_t
rx_buff
;
int
(
*
tx_start
)(
struct
sk_buff
*
,
struct
net_device
*
,
struct
sa1100_irda
*
);
irqreturn_t
(
*
irq
)(
struct
net_device
*
,
struct
sa1100_irda
*
);
};
};
static
int
sa1100_irda_set_speed
(
struct
sa1100_irda
*
,
int
);
#define IS_FIR(si) ((si)->speed >= 4000000)
#define IS_FIR(si) ((si)->speed >= 4000000)
#define HPSIR_MAX_RXLEN 2047
#define HPSIR_MAX_RXLEN 2047
static
struct
dma_slave_config
sa1100_irda_sir_tx
=
{
.
direction
=
DMA_TO_DEVICE
,
.
dst_addr
=
__PREG
(
Ser2UTDR
),
.
dst_addr_width
=
DMA_SLAVE_BUSWIDTH_1_BYTE
,
.
dst_maxburst
=
4
,
};
static
struct
dma_slave_config
sa1100_irda_fir_rx
=
{
.
direction
=
DMA_FROM_DEVICE
,
.
src_addr
=
__PREG
(
Ser2HSDR
),
.
src_addr_width
=
DMA_SLAVE_BUSWIDTH_1_BYTE
,
.
src_maxburst
=
8
,
};
static
struct
dma_slave_config
sa1100_irda_fir_tx
=
{
.
direction
=
DMA_TO_DEVICE
,
.
dst_addr
=
__PREG
(
Ser2HSDR
),
.
dst_addr_width
=
DMA_SLAVE_BUSWIDTH_1_BYTE
,
.
dst_maxburst
=
8
,
};
static
unsigned
sa1100_irda_dma_xferred
(
struct
sa1100_buf
*
buf
)
{
struct
dma_chan
*
chan
=
buf
->
chan
;
struct
dma_tx_state
state
;
enum
dma_status
status
;
status
=
chan
->
device
->
device_tx_status
(
chan
,
buf
->
cookie
,
&
state
);
if
(
status
!=
DMA_PAUSED
)
return
0
;
return
sg_dma_len
(
&
buf
->
sg
)
-
state
.
residue
;
}
static
int
sa1100_irda_dma_request
(
struct
device
*
dev
,
struct
sa1100_buf
*
buf
,
const
char
*
name
,
struct
dma_slave_config
*
cfg
)
{
dma_cap_mask_t
m
;
int
ret
;
dma_cap_zero
(
m
);
dma_cap_set
(
DMA_SLAVE
,
m
);
buf
->
chan
=
dma_request_channel
(
m
,
sa11x0_dma_filter_fn
,
(
void
*
)
name
);
if
(
!
buf
->
chan
)
{
dev_err
(
dev
,
"unable to request DMA channel for %s
\n
"
,
name
);
return
-
ENOENT
;
}
ret
=
dmaengine_slave_config
(
buf
->
chan
,
cfg
);
if
(
ret
)
dev_warn
(
dev
,
"DMA slave_config for %s returned %d
\n
"
,
name
,
ret
);
buf
->
dev
=
buf
->
chan
->
device
->
dev
;
return
0
;
}
static
void
sa1100_irda_dma_start
(
struct
sa1100_buf
*
buf
,
enum
dma_transfer_direction
dir
,
dma_async_tx_callback
cb
,
void
*
cb_p
)
{
struct
dma_async_tx_descriptor
*
desc
;
struct
dma_chan
*
chan
=
buf
->
chan
;
desc
=
chan
->
device
->
device_prep_slave_sg
(
chan
,
&
buf
->
sg
,
1
,
dir
,
DMA_PREP_INTERRUPT
|
DMA_CTRL_ACK
);
if
(
desc
)
{
desc
->
callback
=
cb
;
desc
->
callback_param
=
cb_p
;
buf
->
cookie
=
dmaengine_submit
(
desc
);
dma_async_issue_pending
(
chan
);
}
}
/*
/*
* Allocate and map the receive buffer, unless it is already allocated.
* Allocate and map the receive buffer, unless it is already allocated.
*/
*/
static
int
sa1100_irda_rx_alloc
(
struct
sa1100_irda
*
si
)
static
int
sa1100_irda_rx_alloc
(
struct
sa1100_irda
*
si
)
{
{
if
(
si
->
rx
skb
)
if
(
si
->
dma_rx
.
skb
)
return
0
;
return
0
;
si
->
rxskb
=
alloc_skb
(
HPSIR_MAX_RXLEN
+
1
,
GFP_ATOMIC
);
si
->
dma_rx
.
skb
=
alloc_skb
(
HPSIR_MAX_RXLEN
+
1
,
GFP_ATOMIC
);
if
(
!
si
->
dma_rx
.
skb
)
{
if
(
!
si
->
rxskb
)
{
printk
(
KERN_ERR
"sa1100_ir: out of memory for RX SKB
\n
"
);
printk
(
KERN_ERR
"sa1100_ir: out of memory for RX SKB
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
}
}
...
@@ -92,11 +175,14 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
...
@@ -92,11 +175,14 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
* Align any IP headers that may be contained
* Align any IP headers that may be contained
* within the frame.
* within the frame.
*/
*/
skb_reserve
(
si
->
rxskb
,
1
);
skb_reserve
(
si
->
dma_rx
.
skb
,
1
);
sg_set_buf
(
&
si
->
dma_rx
.
sg
,
si
->
dma_rx
.
skb
->
data
,
HPSIR_MAX_RXLEN
);
if
(
dma_map_sg
(
si
->
dma_rx
.
dev
,
&
si
->
dma_rx
.
sg
,
1
,
DMA_FROM_DEVICE
)
==
0
)
{
dev_kfree_skb_any
(
si
->
dma_rx
.
skb
);
return
-
ENOMEM
;
}
si
->
rxbuf_dma
=
dma_map_single
(
si
->
dev
,
si
->
rxskb
->
data
,
HPSIR_MAX_RXLEN
,
DMA_FROM_DEVICE
);
return
0
;
return
0
;
}
}
...
@@ -106,7 +192,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
...
@@ -106,7 +192,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
*/
*/
static
void
sa1100_irda_rx_dma_start
(
struct
sa1100_irda
*
si
)
static
void
sa1100_irda_rx_dma_start
(
struct
sa1100_irda
*
si
)
{
{
if
(
!
si
->
rx
skb
)
{
if
(
!
si
->
dma_rx
.
skb
)
{
printk
(
KERN_ERR
"sa1100_ir: rx buffer went missing
\n
"
);
printk
(
KERN_ERR
"sa1100_ir: rx buffer went missing
\n
"
);
return
;
return
;
}
}
...
@@ -114,254 +200,87 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
...
@@ -114,254 +200,87 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
/*
/*
* First empty receive FIFO
* First empty receive FIFO
*/
*/
Ser2HSCR0
=
si
->
hscr0
|
HSCR0_HSSP
;
Ser2HSCR0
=
HSCR0_HSSP
;
/*
/*
* Enable the DMA, receiver and receive interrupt.
* Enable the DMA, receiver and receive interrupt.
*/
*/
sa1100_clear_dma
(
si
->
rxdma
);
dmaengine_terminate_all
(
si
->
dma_rx
.
chan
);
sa1100_start_dma
(
si
->
rxdma
,
si
->
rxbuf_dma
,
HPSIR_MAX_RXLEN
);
sa1100_irda_dma_start
(
&
si
->
dma_rx
,
DMA_DEV_TO_MEM
,
NULL
,
NULL
);
Ser2HSCR0
=
si
->
hscr0
|
HSCR0_HSSP
|
HSCR0_RXE
;
Ser2HSCR0
=
HSCR0_HSSP
|
HSCR0_RXE
;
}
}
/*
static
void
sa1100_irda_check_speed
(
struct
sa1100_irda
*
si
)
* Set the IrDA communications speed.
*/
static
int
sa1100_irda_set_speed
(
struct
sa1100_irda
*
si
,
int
speed
)
{
{
unsigned
long
flags
;
if
(
si
->
newspeed
)
{
int
brd
,
ret
=
-
EINVAL
;
sa1100_irda_set_speed
(
si
,
si
->
newspeed
);
si
->
newspeed
=
0
;
switch
(
speed
)
{
case
9600
:
case
19200
:
case
38400
:
case
57600
:
case
115200
:
brd
=
3686400
/
(
16
*
speed
)
-
1
;
/*
* Stop the receive DMA.
*/
if
(
IS_FIR
(
si
))
sa1100_stop_dma
(
si
->
rxdma
);
local_irq_save
(
flags
);
Ser2UTCR3
=
0
;
Ser2HSCR0
=
HSCR0_UART
;
Ser2UTCR1
=
brd
>>
8
;
Ser2UTCR2
=
brd
;
/*
* Clear status register
*/
Ser2UTSR0
=
UTSR0_REB
|
UTSR0_RBB
|
UTSR0_RID
;
Ser2UTCR3
=
UTCR3_RIE
|
UTCR3_RXE
|
UTCR3_TXE
;
if
(
si
->
pdata
->
set_speed
)
si
->
pdata
->
set_speed
(
si
->
dev
,
speed
);
si
->
speed
=
speed
;
local_irq_restore
(
flags
);
ret
=
0
;
break
;
case
4000000
:
local_irq_save
(
flags
);
si
->
hscr0
=
0
;
Ser2HSSR0
=
0xff
;
Ser2HSCR0
=
si
->
hscr0
|
HSCR0_HSSP
;
Ser2UTCR3
=
0
;
si
->
speed
=
speed
;
if
(
si
->
pdata
->
set_speed
)
si
->
pdata
->
set_speed
(
si
->
dev
,
speed
);
sa1100_irda_rx_alloc
(
si
);
sa1100_irda_rx_dma_start
(
si
);
local_irq_restore
(
flags
);
break
;
default:
break
;
}
}
return
ret
;
}
}
/*
/*
* Control the power state of the IrDA transmitter.
* HP-SIR format support.
* State:
* 0 - off
* 1 - short range, lowest power
* 2 - medium range, medium power
* 3 - maximum range, high power
*
* Currently, only assabet is known to support this.
*/
*/
static
int
static
void
sa1100_irda_sirtxdma_irq
(
void
*
id
)
__sa1100_irda_set_power
(
struct
sa1100_irda
*
si
,
unsigned
int
state
)
{
int
ret
=
0
;
if
(
si
->
pdata
->
set_power
)
ret
=
si
->
pdata
->
set_power
(
si
->
dev
,
state
);
return
ret
;
}
static
inline
int
sa1100_set_power
(
struct
sa1100_irda
*
si
,
unsigned
int
state
)
{
int
ret
;
ret
=
__sa1100_irda_set_power
(
si
,
state
);
if
(
ret
==
0
)
si
->
power
=
state
;
return
ret
;
}
static
int
sa1100_irda_startup
(
struct
sa1100_irda
*
si
)
{
{
int
ret
;
struct
net_device
*
dev
=
id
;
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
/*
dma_unmap_sg
(
si
->
dma_tx
.
dev
,
&
si
->
dma_tx
.
sg
,
1
,
DMA_TO_DEVICE
);
* Ensure that the ports for this device are setup correctly.
dev_kfree_skb
(
si
->
dma_tx
.
skb
);
*/
si
->
dma_tx
.
skb
=
NULL
;
if
(
si
->
pdata
->
startup
)
{
ret
=
si
->
pdata
->
startup
(
si
->
dev
);
if
(
ret
)
return
ret
;
}
/*
dev
->
stats
.
tx_packets
++
;
* Configure PPC for IRDA - we want to drive TXD2 low.
dev
->
stats
.
tx_bytes
+=
sg_dma_len
(
&
si
->
dma_tx
.
sg
);
* We also want to drive this pin low during sleep.
*/
PPSR
&=
~
PPC_TXD2
;
PSDR
&=
~
PPC_TXD2
;
PPDR
|=
PPC_TXD2
;
/*
/* We need to ensure that the transmitter has finished. */
* Enable HP-SIR modulation, and ensure that the port is disabled.
do
*/
rmb
();
Ser2UTCR3
=
0
;
while
(
Ser2UTSR1
&
UTSR1_TBY
);
Ser2HSCR0
=
HSCR0_UART
;
Ser2UTCR4
=
si
->
utcr4
;
Ser2UTCR0
=
UTCR0_8BitData
;
Ser2HSCR2
=
HSCR2_TrDataH
|
HSCR2_RcDataL
;
/*
/*
* Clear status register
* Ok, we've finished transmitting. Now enable the receiver.
* Sometimes we get a receive IRQ immediately after a transmit...
*/
*/
Ser2UTSR0
=
UTSR0_REB
|
UTSR0_RBB
|
UTSR0_RID
;
Ser2UTSR0
=
UTSR0_REB
|
UTSR0_RBB
|
UTSR0_RID
;
Ser2UTCR3
=
UTCR3_RIE
|
UTCR3_RXE
|
UTCR3_TXE
;
ret
=
sa1100_irda_set_speed
(
si
,
si
->
speed
=
9600
);
sa1100_irda_check_speed
(
si
);
if
(
ret
)
{
Ser2UTCR3
=
0
;
Ser2HSCR0
=
0
;
if
(
si
->
pdata
->
shutdown
)
si
->
pdata
->
shutdown
(
si
->
dev
);
}
return
ret
;
}
static
void
sa1100_irda_shutdown
(
struct
sa1100_irda
*
si
)
{
/*
* Stop all DMA activity.
*/
sa1100_stop_dma
(
si
->
rxdma
);
sa1100_stop_dma
(
si
->
txdma
);
/* Disable the port. */
Ser2UTCR3
=
0
;
Ser2HSCR0
=
0
;
if
(
si
->
pdata
->
shutdown
)
si
->
pdata
->
shutdown
(
si
->
dev
);
}
#ifdef CONFIG_PM
/*
* Suspend the IrDA interface.
*/
static
int
sa1100_irda_suspend
(
struct
platform_device
*
pdev
,
pm_message_t
state
)
{
struct
net_device
*
dev
=
platform_get_drvdata
(
pdev
);
struct
sa1100_irda
*
si
;
if
(
!
dev
)
return
0
;
si
=
netdev_priv
(
dev
);
/* I'm hungry! */
if
(
si
->
open
)
{
netif_wake_queue
(
dev
);
/*
* Stop the transmit queue
*/
netif_device_detach
(
dev
);
disable_irq
(
dev
->
irq
);
sa1100_irda_shutdown
(
si
);
__sa1100_irda_set_power
(
si
,
0
);
}
return
0
;
}
}
/*
static
int
sa1100_irda_sir_tx_start
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
,
* Resume the IrDA interface.
struct
sa1100_irda
*
si
)
*/
static
int
sa1100_irda_resume
(
struct
platform_device
*
pdev
)
{
{
struct
net_device
*
dev
=
platform_get_drvdata
(
pdev
);
si
->
tx_buff
.
data
=
si
->
tx_buff
.
head
;
struct
sa1100_irda
*
si
;
si
->
tx_buff
.
len
=
async_wrap_skb
(
skb
,
si
->
tx_buff
.
data
,
si
->
tx_buff
.
truesize
);
if
(
!
dev
)
return
0
;
si
=
netdev_priv
(
dev
);
si
->
dma_tx
.
skb
=
skb
;
if
(
si
->
open
)
{
sg_set_buf
(
&
si
->
dma_tx
.
sg
,
si
->
tx_buff
.
data
,
si
->
tx_buff
.
len
);
/*
if
(
dma_map_sg
(
si
->
dma_tx
.
dev
,
&
si
->
dma_tx
.
sg
,
1
,
DMA_TO_DEVICE
)
==
0
)
{
* If we missed a speed change, initialise at the new speed
si
->
dma_tx
.
skb
=
NULL
;
* directly. It is debatable whether this is actually
netif_wake_queue
(
dev
);
* required, but in the interests of continuing from where
dev
->
stats
.
tx_dropped
++
;
* we left off it is desirable. The converse argument is
return
NETDEV_TX_OK
;
* that we should re-negotiate at 9600 baud again.
*/
if
(
si
->
newspeed
)
{
si
->
speed
=
si
->
newspeed
;
si
->
newspeed
=
0
;
}
}
sa1100_irda_startup
(
si
);
sa1100_irda_dma_start
(
&
si
->
dma_tx
,
DMA_MEM_TO_DEV
,
sa1100_irda_sirtxdma_irq
,
dev
);
__sa1100_irda_set_power
(
si
,
si
->
power
);
enable_irq
(
dev
->
irq
);
/*
/*
* This automatically wakes up the queue
* The mean turn-around time is enforced by XBOF padding,
* so we don't have to do anything special here.
*/
*/
netif_device_attach
(
dev
);
Ser2UTCR3
=
UTCR3_TXE
;
}
return
0
;
return
NETDEV_TX_OK
;
}
}
#else
#define sa1100_irda_suspend NULL
#define sa1100_irda_resume NULL
#endif
/*
static
irqreturn_t
sa1100_irda_sir_irq
(
struct
net_device
*
dev
,
struct
sa1100_irda
*
si
)
* HP-SIR format interrupt service routines.
*/
static
void
sa1100_irda_hpsir_irq
(
struct
net_device
*
dev
)
{
{
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
int
status
;
int
status
;
status
=
Ser2UTSR0
;
status
=
Ser2UTSR0
;
...
@@ -414,51 +333,96 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
...
@@ -414,51 +333,96 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
}
}
if
(
status
&
UTSR0_TFS
&&
si
->
tx_buff
.
len
)
{
return
IRQ_HANDLED
;
/*
}
* Transmitter FIFO is not full
*/
do
{
Ser2UTDR
=
*
si
->
tx_buff
.
data
++
;
si
->
tx_buff
.
len
-=
1
;
}
while
(
Ser2UTSR1
&
UTSR1_TNF
&&
si
->
tx_buff
.
len
);
if
(
si
->
tx_buff
.
len
==
0
)
{
/*
dev
->
stats
.
tx_packets
++
;
* FIR format support.
dev
->
stats
.
tx_bytes
+=
si
->
tx_buff
.
data
-
*/
si
->
tx_buff
.
head
;
static
void
sa1100_irda_firtxdma_irq
(
void
*
id
)
{
struct
net_device
*
dev
=
id
;
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
struct
sk_buff
*
skb
;
/*
/*
* We need to ensure that the transmitter has
* Wait for the transmission to complete. Unfortunately,
* finished.
* the hardware doesn't give us an interrupt to indicate
* "end of frame".
*/
*/
do
do
rmb
();
rmb
();
while
(
Ser2UTSR1
&
UT
SR1_TBY
);
while
(
!
(
Ser2HSSR0
&
HSSR0_TUR
)
||
Ser2HSSR1
&
HS
SR1_TBY
);
/*
/*
* Ok, we've finished transmitting. Now enable
* Clear the transmit underrun bit.
* the receiver. Sometimes we get a receive IRQ
* immediately after a transmit...
*/
*/
Ser2UTSR0
=
UTSR0_REB
|
UTSR0_RBB
|
UTSR0_RID
;
Ser2HSSR0
=
HSSR0_TUR
;
Ser2UTCR3
=
UTCR3_RIE
|
UTCR3_RXE
|
UTCR3_TXE
;
if
(
si
->
newspeed
)
{
/*
sa1100_irda_set_speed
(
si
,
si
->
newspeed
);
* Do we need to change speed? Note that we're lazy
si
->
newspeed
=
0
;
* here - we don't free the old dma_rx.skb. We don't need
* to allocate a buffer either.
*/
sa1100_irda_check_speed
(
si
);
/*
* Start reception. This disables the transmitter for
* us. This will be using the existing RX buffer.
*/
sa1100_irda_rx_dma_start
(
si
);
/* Account and free the packet. */
skb
=
si
->
dma_tx
.
skb
;
if
(
skb
)
{
dma_unmap_sg
(
si
->
dma_tx
.
dev
,
&
si
->
dma_tx
.
sg
,
1
,
DMA_TO_DEVICE
);
dev
->
stats
.
tx_packets
++
;
dev
->
stats
.
tx_bytes
+=
skb
->
len
;
dev_kfree_skb_irq
(
skb
);
si
->
dma_tx
.
skb
=
NULL
;
}
}
/* I'm hungry! */
/*
* Make sure that the TX queue is available for sending
* (for retries). TX has priority over RX at all times.
*/
netif_wake_queue
(
dev
);
netif_wake_queue
(
dev
);
}
static
int
sa1100_irda_fir_tx_start
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
,
struct
sa1100_irda
*
si
)
{
int
mtt
=
irda_get_mtt
(
skb
);
si
->
dma_tx
.
skb
=
skb
;
sg_set_buf
(
&
si
->
dma_tx
.
sg
,
skb
->
data
,
skb
->
len
);
if
(
dma_map_sg
(
si
->
dma_tx
.
dev
,
&
si
->
dma_tx
.
sg
,
1
,
DMA_TO_DEVICE
)
==
0
)
{
si
->
dma_tx
.
skb
=
NULL
;
netif_wake_queue
(
dev
);
dev
->
stats
.
tx_dropped
++
;
dev_kfree_skb
(
skb
);
return
NETDEV_TX_OK
;
}
}
}
sa1100_irda_dma_start
(
&
si
->
dma_tx
,
DMA_MEM_TO_DEV
,
sa1100_irda_firtxdma_irq
,
dev
);
/*
* If we have a mean turn-around time, impose the specified
* specified delay. We could shorten this by timing from
* the point we received the packet.
*/
if
(
mtt
)
udelay
(
mtt
);
Ser2HSCR0
=
HSCR0_HSSP
|
HSCR0_TXE
;
return
NETDEV_TX_OK
;
}
}
static
void
sa1100_irda_fir_error
(
struct
sa1100_irda
*
si
,
struct
net_device
*
dev
)
static
void
sa1100_irda_fir_error
(
struct
sa1100_irda
*
si
,
struct
net_device
*
dev
)
{
{
struct
sk_buff
*
skb
=
si
->
rxskb
;
struct
sk_buff
*
skb
=
si
->
dma_rx
.
skb
;
dma_addr_t
dma_addr
;
unsigned
int
len
,
stat
,
data
;
unsigned
int
len
,
stat
,
data
;
if
(
!
skb
)
{
if
(
!
skb
)
{
...
@@ -469,11 +433,10 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
...
@@ -469,11 +433,10 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
/*
/*
* Get the current data position.
* Get the current data position.
*/
*/
dma_addr
=
sa1100_get_dma_pos
(
si
->
rxdma
);
len
=
sa1100_irda_dma_xferred
(
&
si
->
dma_rx
);
len
=
dma_addr
-
si
->
rxbuf_dma
;
if
(
len
>
HPSIR_MAX_RXLEN
)
if
(
len
>
HPSIR_MAX_RXLEN
)
len
=
HPSIR_MAX_RXLEN
;
len
=
HPSIR_MAX_RXLEN
;
dma_unmap_s
ingle
(
si
->
dev
,
si
->
rxbuf_dma
,
len
,
DMA_FROM_DEVICE
);
dma_unmap_s
g
(
si
->
dma_rx
.
dev
,
&
si
->
dma_rx
.
sg
,
1
,
DMA_FROM_DEVICE
);
do
{
do
{
/*
/*
...
@@ -501,7 +464,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
...
@@ -501,7 +464,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
}
while
(
Ser2HSSR0
&
HSSR0_EIF
);
}
while
(
Ser2HSSR0
&
HSSR0_EIF
);
if
(
stat
&
HSSR1_EOF
)
{
if
(
stat
&
HSSR1_EOF
)
{
si
->
rx
skb
=
NULL
;
si
->
dma_rx
.
skb
=
NULL
;
skb_put
(
skb
,
len
);
skb_put
(
skb
,
len
);
skb
->
dev
=
dev
;
skb
->
dev
=
dev
;
...
@@ -518,28 +481,23 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
...
@@ -518,28 +481,23 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
netif_rx
(
skb
);
netif_rx
(
skb
);
}
else
{
}
else
{
/*
/*
* Remap the buffer.
* Remap the buffer - it was previously mapped, and we
* hope that this succeeds.
*/
*/
si
->
rxbuf_dma
=
dma_map_single
(
si
->
dev
,
si
->
rxskb
->
data
,
dma_map_sg
(
si
->
dma_rx
.
dev
,
&
si
->
dma_rx
.
sg
,
1
,
DMA_FROM_DEVICE
);
HPSIR_MAX_RXLEN
,
DMA_FROM_DEVICE
);
}
}
}
}
/*
/*
* FIR format interrupt service routine. We only have to
* We only have to handle RX events here; transmit events go via the TX
* handle RX events; transmit events go via the TX DMA handler.
* DMA handler. We disable RX, process, and the restart RX.
*
* No matter what, we disable RX, process, and the restart RX.
*/
*/
static
void
sa1100_irda_fir_irq
(
struct
net_device
*
dev
)
static
irqreturn_t
sa1100_irda_fir_irq
(
struct
net_device
*
dev
,
struct
sa1100_irda
*
si
)
{
{
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
/*
/*
* Stop RX DMA
* Stop RX DMA
*/
*/
sa1100_stop_dma
(
si
->
rxdma
);
dmaengine_pause
(
si
->
dma_rx
.
chan
);
/*
/*
* Framing error - we throw away the packet completely.
* Framing error - we throw away the packet completely.
...
@@ -555,7 +513,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
...
@@ -555,7 +513,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
/*
/*
* Clear out the DMA...
* Clear out the DMA...
*/
*/
Ser2HSCR0
=
si
->
hscr0
|
HSCR0_HSSP
;
Ser2HSCR0
=
HSCR0_HSSP
;
/*
/*
* Clear selected status bits now, so we
* Clear selected status bits now, so we
...
@@ -573,78 +531,128 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
...
@@ -573,78 +531,128 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
if
(
Ser2HSSR0
&
HSSR0_EIF
)
if
(
Ser2HSSR0
&
HSSR0_EIF
)
sa1100_irda_fir_error
(
si
,
dev
);
sa1100_irda_fir_error
(
si
,
dev
);
/*
/*
* No matter what happens, we must restart reception.
* No matter what happens, we must restart reception.
*/
*/
sa1100_irda_rx_dma_start
(
si
);
sa1100_irda_rx_dma_start
(
si
);
return
IRQ_HANDLED
;
}
/*
* Set the IrDA communications speed.
*/
static
int
sa1100_irda_set_speed
(
struct
sa1100_irda
*
si
,
int
speed
)
{
unsigned
long
flags
;
int
brd
,
ret
=
-
EINVAL
;
switch
(
speed
)
{
case
9600
:
case
19200
:
case
38400
:
case
57600
:
case
115200
:
brd
=
3686400
/
(
16
*
speed
)
-
1
;
/* Stop the receive DMA, and configure transmit. */
if
(
IS_FIR
(
si
))
{
dmaengine_terminate_all
(
si
->
dma_rx
.
chan
);
dmaengine_slave_config
(
si
->
dma_tx
.
chan
,
&
sa1100_irda_sir_tx
);
}
local_irq_save
(
flags
);
Ser2UTCR3
=
0
;
Ser2HSCR0
=
HSCR0_UART
;
Ser2UTCR1
=
brd
>>
8
;
Ser2UTCR2
=
brd
;
/*
* Clear status register
*/
Ser2UTSR0
=
UTSR0_REB
|
UTSR0_RBB
|
UTSR0_RID
;
Ser2UTCR3
=
UTCR3_RIE
|
UTCR3_RXE
|
UTCR3_TXE
;
if
(
si
->
pdata
->
set_speed
)
si
->
pdata
->
set_speed
(
si
->
dev
,
speed
);
si
->
speed
=
speed
;
si
->
tx_start
=
sa1100_irda_sir_tx_start
;
si
->
irq
=
sa1100_irda_sir_irq
;
local_irq_restore
(
flags
);
ret
=
0
;
break
;
case
4000000
:
if
(
!
IS_FIR
(
si
))
dmaengine_slave_config
(
si
->
dma_tx
.
chan
,
&
sa1100_irda_fir_tx
);
local_irq_save
(
flags
);
Ser2HSSR0
=
0xff
;
Ser2HSCR0
=
HSCR0_HSSP
;
Ser2UTCR3
=
0
;
si
->
speed
=
speed
;
si
->
tx_start
=
sa1100_irda_fir_tx_start
;
si
->
irq
=
sa1100_irda_fir_irq
;
if
(
si
->
pdata
->
set_speed
)
si
->
pdata
->
set_speed
(
si
->
dev
,
speed
);
sa1100_irda_rx_alloc
(
si
);
sa1100_irda_rx_dma_start
(
si
);
local_irq_restore
(
flags
);
break
;
default:
break
;
}
return
ret
;
}
/*
* Control the power state of the IrDA transmitter.
* State:
* 0 - off
* 1 - short range, lowest power
* 2 - medium range, medium power
* 3 - maximum range, high power
*
* Currently, only assabet is known to support this.
*/
static
int
__sa1100_irda_set_power
(
struct
sa1100_irda
*
si
,
unsigned
int
state
)
{
int
ret
=
0
;
if
(
si
->
pdata
->
set_power
)
ret
=
si
->
pdata
->
set_power
(
si
->
dev
,
state
);
return
ret
;
}
static
inline
int
sa1100_set_power
(
struct
sa1100_irda
*
si
,
unsigned
int
state
)
{
int
ret
;
ret
=
__sa1100_irda_set_power
(
si
,
state
);
if
(
ret
==
0
)
si
->
power
=
state
;
return
ret
;
}
}
static
irqreturn_t
sa1100_irda_irq
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
sa1100_irda_irq
(
int
irq
,
void
*
dev_id
)
{
{
struct
net_device
*
dev
=
dev_id
;
struct
net_device
*
dev
=
dev_id
;
if
(
IS_FIR
(((
struct
sa1100_irda
*
)
netdev_priv
(
dev
))))
sa1100_irda_fir_irq
(
dev
);
else
sa1100_irda_hpsir_irq
(
dev
);
return
IRQ_HANDLED
;
}
/*
* TX DMA completion handler.
*/
static
void
sa1100_irda_txdma_irq
(
void
*
id
)
{
struct
net_device
*
dev
=
id
;
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
struct
sk_buff
*
skb
=
si
->
txskb
;
si
->
txskb
=
NULL
;
/*
* Wait for the transmission to complete. Unfortunately,
* the hardware doesn't give us an interrupt to indicate
* "end of frame".
*/
do
rmb
();
while
(
!
(
Ser2HSSR0
&
HSSR0_TUR
)
||
Ser2HSSR1
&
HSSR1_TBY
);
/*
* Clear the transmit underrun bit.
*/
Ser2HSSR0
=
HSSR0_TUR
;
/*
* Do we need to change speed? Note that we're lazy
* here - we don't free the old rxskb. We don't need
* to allocate a buffer either.
*/
if
(
si
->
newspeed
)
{
sa1100_irda_set_speed
(
si
,
si
->
newspeed
);
si
->
newspeed
=
0
;
}
/*
* Start reception. This disables the transmitter for
* us. This will be using the existing RX buffer.
*/
sa1100_irda_rx_dma_start
(
si
);
/*
* Account and free the packet.
*/
if
(
skb
)
{
dma_unmap_single
(
si
->
dev
,
si
->
txbuf_dma
,
skb
->
len
,
DMA_TO_DEVICE
);
dev
->
stats
.
tx_packets
++
;
dev
->
stats
.
tx_bytes
+=
skb
->
len
;
dev_kfree_skb_irq
(
skb
);
}
/*
return
si
->
irq
(
dev
,
si
);
* Make sure that the TX queue is available for sending
* (for retries). TX has priority over RX at all times.
*/
netif_wake_queue
(
dev
);
}
}
static
int
sa1100_irda_hard_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
static
int
sa1100_irda_hard_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
...
@@ -660,62 +668,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -660,62 +668,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
if
(
speed
!=
si
->
speed
&&
speed
!=
-
1
)
if
(
speed
!=
si
->
speed
&&
speed
!=
-
1
)
si
->
newspeed
=
speed
;
si
->
newspeed
=
speed
;
/*
/* If this is an empty frame, we can bypass a lot. */
* If this is an empty frame, we can bypass a lot.
*/
if
(
skb
->
len
==
0
)
{
if
(
skb
->
len
==
0
)
{
if
(
si
->
newspeed
)
{
sa1100_irda_check_speed
(
si
);
si
->
newspeed
=
0
;
sa1100_irda_set_speed
(
si
,
speed
);
}
dev_kfree_skb
(
skb
);
dev_kfree_skb
(
skb
);
return
NETDEV_TX_OK
;
return
NETDEV_TX_OK
;
}
}
if
(
!
IS_FIR
(
si
))
{
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
si
->
tx_buff
.
data
=
si
->
tx_buff
.
head
;
/* We must not already have a skb to transmit... */
si
->
tx_buff
.
len
=
async_wrap_skb
(
skb
,
si
->
tx_buff
.
data
,
BUG_ON
(
si
->
dma_tx
.
skb
);
si
->
tx_buff
.
truesize
);
/*
* Set the transmit interrupt enable. This will fire
* off an interrupt immediately. Note that we disable
* the receiver so we won't get spurious characteres
* received.
*/
Ser2UTCR3
=
UTCR3_TIE
|
UTCR3_TXE
;
dev_kfree_skb
(
skb
);
}
else
{
int
mtt
=
irda_get_mtt
(
skb
);
/*
* We must not be transmitting...
*/
BUG_ON
(
si
->
txskb
);
netif_stop_queue
(
dev
);
si
->
txskb
=
skb
;
si
->
txbuf_dma
=
dma_map_single
(
si
->
dev
,
skb
->
data
,
skb
->
len
,
DMA_TO_DEVICE
);
sa1100_start_dma
(
si
->
txdma
,
si
->
txbuf_dma
,
skb
->
len
);
return
si
->
tx_start
(
skb
,
dev
,
si
);
/*
* If we have a mean turn-around time, impose the specified
* specified delay. We could shorten this by timing from
* the point we received the packet.
*/
if
(
mtt
)
udelay
(
mtt
);
Ser2HSCR0
=
si
->
hscr0
|
HSCR0_HSSP
|
HSCR0_TXE
;
}
return
NETDEV_TX_OK
;
}
}
static
int
static
int
...
@@ -762,6 +727,69 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
...
@@ -762,6 +727,69 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
return
ret
;
return
ret
;
}
}
static
int
sa1100_irda_startup
(
struct
sa1100_irda
*
si
)
{
int
ret
;
/*
* Ensure that the ports for this device are setup correctly.
*/
if
(
si
->
pdata
->
startup
)
{
ret
=
si
->
pdata
->
startup
(
si
->
dev
);
if
(
ret
)
return
ret
;
}
/*
* Configure PPC for IRDA - we want to drive TXD2 low.
* We also want to drive this pin low during sleep.
*/
PPSR
&=
~
PPC_TXD2
;
PSDR
&=
~
PPC_TXD2
;
PPDR
|=
PPC_TXD2
;
/*
* Enable HP-SIR modulation, and ensure that the port is disabled.
*/
Ser2UTCR3
=
0
;
Ser2HSCR0
=
HSCR0_UART
;
Ser2UTCR4
=
si
->
utcr4
;
Ser2UTCR0
=
UTCR0_8BitData
;
Ser2HSCR2
=
HSCR2_TrDataH
|
HSCR2_RcDataL
;
/*
* Clear status register
*/
Ser2UTSR0
=
UTSR0_REB
|
UTSR0_RBB
|
UTSR0_RID
;
ret
=
sa1100_irda_set_speed
(
si
,
si
->
speed
=
9600
);
if
(
ret
)
{
Ser2UTCR3
=
0
;
Ser2HSCR0
=
0
;
if
(
si
->
pdata
->
shutdown
)
si
->
pdata
->
shutdown
(
si
->
dev
);
}
return
ret
;
}
static
void
sa1100_irda_shutdown
(
struct
sa1100_irda
*
si
)
{
/*
* Stop all DMA activity.
*/
dmaengine_terminate_all
(
si
->
dma_rx
.
chan
);
dmaengine_terminate_all
(
si
->
dma_tx
.
chan
);
/* Disable the port. */
Ser2UTCR3
=
0
;
Ser2HSCR0
=
0
;
if
(
si
->
pdata
->
shutdown
)
si
->
pdata
->
shutdown
(
si
->
dev
);
}
static
int
sa1100_irda_start
(
struct
net_device
*
dev
)
static
int
sa1100_irda_start
(
struct
net_device
*
dev
)
{
{
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
...
@@ -769,25 +797,16 @@ static int sa1100_irda_start(struct net_device *dev)
...
@@ -769,25 +797,16 @@ static int sa1100_irda_start(struct net_device *dev)
si
->
speed
=
9600
;
si
->
speed
=
9600
;
err
=
request_irq
(
dev
->
irq
,
sa1100_irda_irq
,
0
,
dev
->
name
,
dev
);
err
=
sa1100_irda_dma_request
(
si
->
dev
,
&
si
->
dma_rx
,
"Ser2ICPRc"
,
if
(
err
)
&
sa1100_irda_fir_rx
);
goto
err_irq
;
err
=
sa1100_request_dma
(
DMA_Ser2HSSPRd
,
"IrDA receive"
,
NULL
,
NULL
,
&
si
->
rxdma
);
if
(
err
)
if
(
err
)
goto
err_rx_dma
;
goto
err_rx_dma
;
err
=
sa1100_
request_dma
(
DMA_Ser2HSSPWr
,
"IrDA transmit
"
,
err
=
sa1100_
irda_dma_request
(
si
->
dev
,
&
si
->
dma_tx
,
"Ser2ICPTr
"
,
sa1100_irda_txdma_irq
,
dev
,
&
si
->
txdma
);
&
sa1100_irda_sir_tx
);
if
(
err
)
if
(
err
)
goto
err_tx_dma
;
goto
err_tx_dma
;
/*
* The interrupt must remain disabled for now.
*/
disable_irq
(
dev
->
irq
);
/*
/*
* Setup the serial port for the specified speed.
* Setup the serial port for the specified speed.
*/
*/
...
@@ -803,44 +822,60 @@ static int sa1100_irda_start(struct net_device *dev)
...
@@ -803,44 +822,60 @@ static int sa1100_irda_start(struct net_device *dev)
if
(
!
si
->
irlap
)
if
(
!
si
->
irlap
)
goto
err_irlap
;
goto
err_irlap
;
err
=
request_irq
(
dev
->
irq
,
sa1100_irda_irq
,
0
,
dev
->
name
,
dev
);
if
(
err
)
goto
err_irq
;
/*
/*
* Now enable the interrupt and start the queue
* Now enable the interrupt and start the queue
*/
*/
si
->
open
=
1
;
si
->
open
=
1
;
sa1100_set_power
(
si
,
power_level
);
/* low power mode */
sa1100_set_power
(
si
,
power_level
);
/* low power mode */
enable_irq
(
dev
->
irq
);
netif_start_queue
(
dev
);
netif_start_queue
(
dev
);
return
0
;
return
0
;
err_irq:
irlap_close
(
si
->
irlap
);
err_irlap:
err_irlap:
si
->
open
=
0
;
si
->
open
=
0
;
sa1100_irda_shutdown
(
si
);
sa1100_irda_shutdown
(
si
);
err_startup:
err_startup:
sa1100_free_dma
(
si
->
txdma
);
dma_release_channel
(
si
->
dma_tx
.
chan
);
err_tx_dma:
err_tx_dma:
sa1100_free_dma
(
si
->
rxdma
);
dma_release_channel
(
si
->
dma_rx
.
chan
);
err_rx_dma:
err_rx_dma:
free_irq
(
dev
->
irq
,
dev
);
err_irq:
return
err
;
return
err
;
}
}
static
int
sa1100_irda_stop
(
struct
net_device
*
dev
)
static
int
sa1100_irda_stop
(
struct
net_device
*
dev
)
{
{
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
struct
sa1100_irda
*
si
=
netdev_priv
(
dev
);
struct
sk_buff
*
skb
;
disable_irq
(
dev
->
irq
);
netif_stop_queue
(
dev
);
si
->
open
=
0
;
sa1100_irda_shutdown
(
si
);
sa1100_irda_shutdown
(
si
);
/*
/*
* If we have been doing
DMA receive
, make sure we
* If we have been doing
any DMA activity
, make sure we
* tidy that up cleanly.
* tidy that up cleanly.
*/
*/
if
(
si
->
rxskb
)
{
skb
=
si
->
dma_rx
.
skb
;
dma_unmap_single
(
si
->
dev
,
si
->
rxbuf_dma
,
HPSIR_MAX_RXLEN
,
if
(
skb
)
{
dma_unmap_sg
(
si
->
dma_rx
.
dev
,
&
si
->
dma_rx
.
sg
,
1
,
DMA_FROM_DEVICE
);
DMA_FROM_DEVICE
);
dev_kfree_skb
(
si
->
rxskb
);
dev_kfree_skb
(
skb
);
si
->
rxskb
=
NULL
;
si
->
dma_rx
.
skb
=
NULL
;
}
skb
=
si
->
dma_tx
.
skb
;
if
(
skb
)
{
dma_unmap_sg
(
si
->
dma_tx
.
dev
,
&
si
->
dma_tx
.
sg
,
1
,
DMA_TO_DEVICE
);
dev_kfree_skb
(
skb
);
si
->
dma_tx
.
skb
=
NULL
;
}
}
/* Stop IrLAP */
/* Stop IrLAP */
...
@@ -849,14 +884,11 @@ static int sa1100_irda_stop(struct net_device *dev)
...
@@ -849,14 +884,11 @@ static int sa1100_irda_stop(struct net_device *dev)
si
->
irlap
=
NULL
;
si
->
irlap
=
NULL
;
}
}
netif_stop_queue
(
dev
);
si
->
open
=
0
;
/*
/*
* Free resources
* Free resources
*/
*/
sa1100_free_dma
(
si
->
txdma
);
dma_release_channel
(
si
->
dma_tx
.
chan
);
sa1100_free_dma
(
si
->
rxdma
);
dma_release_channel
(
si
->
dma_rx
.
chan
);
free_irq
(
dev
->
irq
,
dev
);
free_irq
(
dev
->
irq
,
dev
);
sa1100_set_power
(
si
,
0
);
sa1100_set_power
(
si
,
0
);
...
@@ -888,11 +920,15 @@ static int sa1100_irda_probe(struct platform_device *pdev)
...
@@ -888,11 +920,15 @@ static int sa1100_irda_probe(struct platform_device *pdev)
struct
net_device
*
dev
;
struct
net_device
*
dev
;
struct
sa1100_irda
*
si
;
struct
sa1100_irda
*
si
;
unsigned
int
baudrate_mask
;
unsigned
int
baudrate_mask
;
int
err
;
int
err
,
irq
;
if
(
!
pdev
->
dev
.
platform_data
)
if
(
!
pdev
->
dev
.
platform_data
)
return
-
EINVAL
;
return
-
EINVAL
;
irq
=
platform_get_irq
(
pdev
,
0
);
if
(
irq
<=
0
)
return
irq
<
0
?
irq
:
-
ENXIO
;
err
=
request_mem_region
(
__PREG
(
Ser2UTCR0
),
0x24
,
"IrDA"
)
?
0
:
-
EBUSY
;
err
=
request_mem_region
(
__PREG
(
Ser2UTCR0
),
0x24
,
"IrDA"
)
?
0
:
-
EBUSY
;
if
(
err
)
if
(
err
)
goto
err_mem_1
;
goto
err_mem_1
;
...
@@ -907,22 +943,27 @@ static int sa1100_irda_probe(struct platform_device *pdev)
...
@@ -907,22 +943,27 @@ static int sa1100_irda_probe(struct platform_device *pdev)
if
(
!
dev
)
if
(
!
dev
)
goto
err_mem_4
;
goto
err_mem_4
;
SET_NETDEV_DEV
(
dev
,
&
pdev
->
dev
);
si
=
netdev_priv
(
dev
);
si
=
netdev_priv
(
dev
);
si
->
dev
=
&
pdev
->
dev
;
si
->
dev
=
&
pdev
->
dev
;
si
->
pdata
=
pdev
->
dev
.
platform_data
;
si
->
pdata
=
pdev
->
dev
.
platform_data
;
sg_init_table
(
&
si
->
dma_rx
.
sg
,
1
);
sg_init_table
(
&
si
->
dma_tx
.
sg
,
1
);
/*
/*
* Initialise the HP-SIR buffers
* Initialise the HP-SIR buffers
*/
*/
err
=
sa1100_irda_init_iobuf
(
&
si
->
rx_buff
,
14384
);
err
=
sa1100_irda_init_iobuf
(
&
si
->
rx_buff
,
14384
);
if
(
err
)
if
(
err
)
goto
err_mem_5
;
goto
err_mem_5
;
err
=
sa1100_irda_init_iobuf
(
&
si
->
tx_buff
,
4000
);
err
=
sa1100_irda_init_iobuf
(
&
si
->
tx_buff
,
IRDA_SIR_MAX_FRAME
);
if
(
err
)
if
(
err
)
goto
err_mem_5
;
goto
err_mem_5
;
dev
->
netdev_ops
=
&
sa1100_irda_netdev_ops
;
dev
->
netdev_ops
=
&
sa1100_irda_netdev_ops
;
dev
->
irq
=
IRQ_Ser2ICP
;
dev
->
irq
=
irq
;
irda_init_max_qos_capabilies
(
&
si
->
qos
);
irda_init_max_qos_capabilies
(
&
si
->
qos
);
...
@@ -996,6 +1037,74 @@ static int sa1100_irda_remove(struct platform_device *pdev)
...
@@ -996,6 +1037,74 @@ static int sa1100_irda_remove(struct platform_device *pdev)
return
0
;
return
0
;
}
}
#ifdef CONFIG_PM
/*
* Suspend the IrDA interface.
*/
static
int
sa1100_irda_suspend
(
struct
platform_device
*
pdev
,
pm_message_t
state
)
{
struct
net_device
*
dev
=
platform_get_drvdata
(
pdev
);
struct
sa1100_irda
*
si
;
if
(
!
dev
)
return
0
;
si
=
netdev_priv
(
dev
);
if
(
si
->
open
)
{
/*
* Stop the transmit queue
*/
netif_device_detach
(
dev
);
disable_irq
(
dev
->
irq
);
sa1100_irda_shutdown
(
si
);
__sa1100_irda_set_power
(
si
,
0
);
}
return
0
;
}
/*
* Resume the IrDA interface.
*/
static
int
sa1100_irda_resume
(
struct
platform_device
*
pdev
)
{
struct
net_device
*
dev
=
platform_get_drvdata
(
pdev
);
struct
sa1100_irda
*
si
;
if
(
!
dev
)
return
0
;
si
=
netdev_priv
(
dev
);
if
(
si
->
open
)
{
/*
* If we missed a speed change, initialise at the new speed
* directly. It is debatable whether this is actually
* required, but in the interests of continuing from where
* we left off it is desirable. The converse argument is
* that we should re-negotiate at 9600 baud again.
*/
if
(
si
->
newspeed
)
{
si
->
speed
=
si
->
newspeed
;
si
->
newspeed
=
0
;
}
sa1100_irda_startup
(
si
);
__sa1100_irda_set_power
(
si
,
si
->
power
);
enable_irq
(
dev
->
irq
);
/*
* This automatically wakes up the queue
*/
netif_device_attach
(
dev
);
}
return
0
;
}
#else
#define sa1100_irda_suspend NULL
#define sa1100_irda_resume NULL
#endif
static
struct
platform_driver
sa1100ir_driver
=
{
static
struct
platform_driver
sa1100ir_driver
=
{
.
probe
=
sa1100_irda_probe
,
.
probe
=
sa1100_irda_probe
,
.
remove
=
sa1100_irda_remove
,
.
remove
=
sa1100_irda_remove
,
...
...
include/linux/sa11x0-dma.h
0 → 100644
浏览文件 @
277dc7ae
/*
* SA11x0 DMA Engine support
*
* Copyright (C) 2012 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_SA11X0_DMA_H
#define __LINUX_SA11X0_DMA_H
struct
dma_chan
;
#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE)
bool
sa11x0_dma_filter_fn
(
struct
dma_chan
*
,
void
*
);
#else
static
inline
bool
sa11x0_dma_filter_fn
(
struct
dma_chan
*
c
,
void
*
d
)
{
return
false
;
}
#endif
#endif
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录