提交 d762f438 编写于 作者: L Linus Torvalds

Merge branch 'sh-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6

* 'sh-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (23 commits)
  sh: Ignore R_SH_NONE module relocations.
  SH: SE7751: Fix pcibios_map_platform_irq prototype.
  sh: remove warning and warning_symbol from struct stacktrace_ops
  sh: wire up sys_sendmmsg.
  clocksource: sh_tmu: Runtime PM support
  clocksource: sh_tmu: __clocksource_updatefreq_hz() update
  clocksource: sh_cmt: Runtime PM support
  clocksource: sh_cmt: __clocksource_updatefreq_hz() update
  dmaengine: shdma: synchronize RCU before freeing, simplify spinlock
  dmaengine: shdma: add runtime- and system-level power management
  dmaengine: shdma: fix locking
  sh: sh-sci: sh7377 and sh73a0 build fixes
  sh: cosmetic improvement: use an existing pointer
  serial: sh-sci: suspend/resume wakeup support V2
  serial: sh-sci: Runtime PM support
  sh: select IRQ_FORCED_THREADING.
  sh: intc: Set virtual IRQs as nothread.
  sh: fixup fpu.o compile order
  i2c: add a module alias to the sh-mobile driver
  ALSA: add a module alias to the FSI driver
  ...
......@@ -21,6 +21,7 @@ config SUPERH
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select IRQ_FORCED_THREADING
select RTC_LIB
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
......
......@@ -482,7 +482,7 @@ static struct i2c_board_info ts_i2c_clients = {
.irq = IRQ0,
};
#if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
/* SDHI0 */
static void sdhi0_set_pwr(struct platform_device *pdev, int state)
{
......@@ -522,7 +522,7 @@ static struct platform_device sdhi0_device = {
},
};
#if !defined(CONFIG_MMC_SH_MMCIF)
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* SDHI1 */
static void sdhi1_set_pwr(struct platform_device *pdev, int state)
{
......@@ -836,7 +836,7 @@ static struct platform_device vou_device = {
},
};
#if defined(CONFIG_MMC_SH_MMCIF)
#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* SH_MMCIF */
static void mmcif_set_pwr(struct platform_device *pdev, int state)
{
......@@ -898,9 +898,9 @@ static struct platform_device *ecovec_devices[] __initdata = {
&ceu0_device,
&ceu1_device,
&keysc_device,
#if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
&sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF)
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
&sdhi1_device,
#endif
#else
......@@ -912,7 +912,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
&fsi_device,
&irda_device,
&vou_device,
#if defined(CONFIG_MMC_SH_MMCIF)
#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
&sh_mmcif_device,
#endif
};
......@@ -1180,7 +1180,7 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6);
#if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
/* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0WP, NULL);
......@@ -1193,7 +1193,7 @@ static int __init arch_setup(void)
gpio_request(GPIO_PTB6, NULL);
gpio_direction_output(GPIO_PTB6, 0);
#if !defined(CONFIG_MMC_SH_MMCIF)
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
gpio_request(GPIO_FN_SDHI1CD, NULL);
gpio_request(GPIO_FN_SDHI1WP, NULL);
......@@ -1284,7 +1284,7 @@ static int __init arch_setup(void)
gpio_request(GPIO_PTU5, NULL);
gpio_direction_output(GPIO_PTU5, 0);
#if defined(CONFIG_MMC_SH_MMCIF)
#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* enable MMCIF (needs DS2.6,7 set to OFF,ON) */
gpio_request(GPIO_FN_MMC_D7, NULL);
gpio_request(GPIO_FN_MMC_D6, NULL);
......
......@@ -115,7 +115,7 @@ CONFIG_USB_GADGET=y
CONFIG_USB_FILE_STORAGE=m
CONFIG_MMC=y
CONFIG_MMC_SPI=y
CONFIG_MMC_TMIO=y
CONFIG_MMC_SDHI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RS5C372=y
CONFIG_UIO=y
......
......@@ -70,7 +70,7 @@ CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_TMIO=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_SH_MMCIF=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
......
......@@ -6,7 +6,7 @@
#include <linux/io.h>
#include "pci-sh4.h"
int __init pcibios_map_platform_irq(u8 slot, u8 pin)
int __init pcibios_map_platform_irq(struct pci_dev *, u8 slot, u8 pin)
{
switch (slot) {
case 0: return 13;
......
......@@ -10,9 +10,6 @@
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
void (*warning)(void *data, char *msg);
/* msg must contain %s for the symbol */
void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
void (*address)(void *data, unsigned long address, int reliable);
/* On negative return stop dumping */
int (*stack)(void *data, char *name);
......
......@@ -373,8 +373,9 @@
#define __NR_open_by_handle_at 360
#define __NR_clock_adjtime 361
#define __NR_syncfs 362
#define __NR_sendmmsg 363
#define NR_syscalls 363
#define NR_syscalls 364
#ifdef __KERNEL__
......
......@@ -394,10 +394,11 @@
#define __NR_open_by_handle_at 371
#define __NR_clock_adjtime 372
#define __NR_syncfs 373
#define __NR_sendmmsg 374
#ifdef __KERNEL__
#define NR_syscalls 374
#define NR_syscalls 375
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
......
......@@ -17,7 +17,5 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
obj-$(CONFIG_SH_ADC) += adc.o
obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_SH_FPU_EMU) += fpu.o
obj-y += irq/ init.o clock.o hwblk.o proc.o
obj-y += irq/ init.o clock.o fpu.o hwblk.o proc.o
......@@ -157,7 +157,7 @@ static int default_platform_runtime_suspend(struct device *dev)
might_sleep();
/* catch misconfigured drivers not starting with resume */
if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags)) {
if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &ad->flags)) {
ret = -EINVAL;
goto out;
}
......@@ -170,8 +170,8 @@ static int default_platform_runtime_suspend(struct device *dev)
/* put device on idle list */
spin_lock_irqsave(&hwblk_lock, flags);
list_add_tail(&pdev->archdata.entry, &hwblk_idle_list);
__set_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
list_add_tail(&ad->entry, &hwblk_idle_list);
__set_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags);
spin_unlock_irqrestore(&hwblk_lock, flags);
/* increase idle count */
......
......@@ -69,19 +69,6 @@ stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
}
}
static void
print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
printk(data);
print_symbol(msg, symbol);
printk("\n");
}
static void print_trace_warning(void *data, char *msg)
{
printk("%s%s\n", (char *)data, msg);
}
static int print_trace_stack(void *data, char *name)
{
printk("%s <%s> ", (char *)data, name);
......@@ -98,8 +85,6 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
}
static const struct stacktrace_ops print_trace_ops = {
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,
.address = print_trace_address,
};
......
......@@ -93,6 +93,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
case R_SH_DIR32:
value = get_unaligned(location);
value += relocation;
......
......@@ -14,16 +14,6 @@
#include <asm/unwinder.h>
#include <asm/ptrace.h>
static void callchain_warning(void *data, char *msg)
{
}
static void
callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
{
}
static int callchain_stack(void *data, char *name)
{
return 0;
......@@ -38,8 +28,6 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
}
static const struct stacktrace_ops callchain_ops = {
.warning = callchain_warning,
.warning_symbol = callchain_warning_symbol,
.stack = callchain_stack,
.address = callchain_address,
};
......
......@@ -17,15 +17,6 @@
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
static void save_stack_warning(void *data, char *msg)
{
}
static void
save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
{
}
static int save_stack_stack(void *data, char *name)
{
return 0;
......@@ -51,8 +42,6 @@ static void save_stack_address(void *data, unsigned long addr, int reliable)
}
static const struct stacktrace_ops save_stack_ops = {
.warning = save_stack_warning,
.warning_symbol = save_stack_warning_symbol,
.stack = save_stack_stack,
.address = save_stack_address,
};
......@@ -88,8 +77,6 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable)
}
static const struct stacktrace_ops save_stack_ops_nosched = {
.warning = save_stack_warning,
.warning_symbol = save_stack_warning_symbol,
.stack = save_stack_stack,
.address = save_stack_address_nosched,
};
......
......@@ -380,3 +380,4 @@ ENTRY(sys_call_table)
.long sys_open_by_handle_at /* 360 */
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
......@@ -400,3 +400,4 @@ sys_call_table:
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
......@@ -23,17 +23,6 @@
#include <asm/sections.h>
#include <asm/stacktrace.h>
static void backtrace_warning_symbol(void *data, char *msg,
unsigned long symbol)
{
/* Ignore warnings */
}
static void backtrace_warning(void *data, char *msg)
{
/* Ignore warnings */
}
static int backtrace_stack(void *data, char *name)
{
/* Yes, we want all stacks */
......@@ -49,8 +38,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
}
static struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
};
......
......@@ -24,6 +24,7 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
......@@ -152,10 +153,12 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
int ret;
/* enable clock */
/* wake up device and enable clock */
pm_runtime_get_sync(&p->pdev->dev);
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
......@@ -187,8 +190,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
/* disable interrupts in CMT block */
sh_cmt_write(p, CMCSR, 0);
/* stop clock */
/* stop clock and mark device as idle */
clk_disable(p->clk);
pm_runtime_put_sync(&p->pdev->dev);
}
/* private flags */
......@@ -416,11 +420,15 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
static int sh_cmt_clocksource_enable(struct clocksource *cs)
{
int ret;
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
p->total_cycles = 0;
return sh_cmt_start(p, FLAG_CLOCKSOURCE);
ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
if (!ret)
__clocksource_updatefreq_hz(cs, p->rate);
return ret;
}
static void sh_cmt_clocksource_disable(struct clocksource *cs)
......@@ -448,19 +456,10 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
/* clk_get_rate() needs an enabled clock */
clk_enable(p->clk);
p->rate = clk_get_rate(p->clk) / ((p->width == 16) ? 512 : 8);
clk_disable(p->clk);
/* TODO: calculate good shift from rate and counter bit width */
cs->shift = 0;
cs->mult = clocksource_hz2mult(p->rate, cs->shift);
dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
......@@ -665,6 +664,7 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
pm_runtime_enable(&pdev->dev);
return 0;
}
......@@ -679,6 +679,9 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
if (!is_early_platform_device(pdev))
pm_runtime_enable(&pdev->dev);
return ret;
}
......
......@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
......@@ -109,10 +110,12 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
/* enable clock */
/* wake up device and enable clock */
pm_runtime_get_sync(&p->pdev->dev);
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
......@@ -141,8 +144,9 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
/* stop clock */
/* stop clock and mark device as idle */
clk_disable(p->clk);
pm_runtime_put_sync(&p->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
......@@ -199,8 +203,12 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
int ret;
return sh_tmu_enable(p);
ret = sh_tmu_enable(p);
if (!ret)
__clocksource_updatefreq_hz(cs, p->rate);
return ret;
}
static void sh_tmu_clocksource_disable(struct clocksource *cs)
......@@ -221,17 +229,10 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
/* clk_get_rate() needs an enabled clock */
clk_enable(p->clk);
/* channel will be configured at parent clock / 4 */
p->rate = clk_get_rate(p->clk) / 4;
clk_disable(p->clk);
/* TODO: calculate good shift from rate and counter bit width */
cs->shift = 10;
cs->mult = clocksource_hz2mult(p->rate, cs->shift);
dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
......@@ -414,6 +415,7 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
pm_runtime_enable(&pdev->dev);
return 0;
}
......@@ -428,6 +430,9 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
if (!is_early_platform_device(pdev))
pm_runtime_enable(&pdev->dev);
return ret;
}
......
......@@ -48,7 +48,7 @@ enum sh_dmae_desc_status {
/*
* Used for write-side mutual exclusion for the global device list,
* read-side synchronization by way of RCU.
* read-side synchronization by way of RCU, and per-controller data.
*/
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);
......@@ -85,22 +85,35 @@ static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
*/
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
{
unsigned short dmaor = dmaor_read(shdev);
unsigned short dmaor;
unsigned long flags;
spin_lock_irqsave(&sh_dmae_lock, flags);
dmaor = dmaor_read(shdev);
dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
spin_unlock_irqrestore(&sh_dmae_lock, flags);
}
static int sh_dmae_rst(struct sh_dmae_device *shdev)
{
unsigned short dmaor;
unsigned long flags;
sh_dmae_ctl_stop(shdev);
dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
spin_lock_irqsave(&sh_dmae_lock, flags);
dmaor_write(shdev, dmaor);
if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
pr_warning("dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
dmaor = dmaor_read(shdev);
spin_unlock_irqrestore(&sh_dmae_lock, flags);
if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
return -EIO;
}
return 0;
}
......@@ -184,7 +197,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
/* When DMA was working, can not set data to CHCR */
/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
if (dmae_is_busy(sh_chan))
return -EBUSY;
......@@ -374,7 +387,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
LIST_HEAD(list);
int descs = sh_chan->descs_allocated;
/* Protect against ISR */
spin_lock_irq(&sh_chan->desc_lock);
dmae_halt(sh_chan);
spin_unlock_irq(&sh_chan->desc_lock);
/* Now no new interrupts will occur */
/* Prepared and not submitted descriptors can still be on the queue */
if (!list_empty(&sh_chan->ld_queue))
......@@ -384,6 +402,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
/* The caller is holding dma_list_mutex */
struct sh_dmae_slave *param = chan->private;
clear_bit(param->slave_id, sh_dmae_slave_used);
chan->private = NULL;
}
spin_lock_bh(&sh_chan->desc_lock);
......@@ -563,8 +582,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
if (!chan || !len)
return NULL;
chan->private = NULL;
sh_chan = to_sh_chan(chan);
sg_init_table(&sg, 1);
......@@ -620,9 +637,9 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (!chan)
return -EINVAL;
spin_lock_bh(&sh_chan->desc_lock);
dmae_halt(sh_chan);
spin_lock_bh(&sh_chan->desc_lock);
if (!list_empty(&sh_chan->ld_queue)) {
/* Record partial transfer */
struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
......@@ -716,6 +733,14 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
list_move(&desc->node, &sh_chan->ld_free);
}
}
if (all && !callback)
/*
* Terminating and the loop completed normally: forgive
* uncompleted cookies
*/
sh_chan->completed_cookie = sh_chan->common.cookie;
spin_unlock_bh(&sh_chan->desc_lock);
if (callback)
......@@ -733,10 +758,6 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
{
while (__ld_cleanup(sh_chan, all))
;
if (all)
/* Terminating - forgive uncompleted cookies */
sh_chan->completed_cookie = sh_chan->common.cookie;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
......@@ -782,8 +803,10 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
sh_dmae_chan_ld_cleanup(sh_chan, false);
last_used = chan->cookie;
/* First read completed cookie to avoid a skew */
last_complete = sh_chan->completed_cookie;
rmb();
last_used = chan->cookie;
BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);
......@@ -813,8 +836,12 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
static irqreturn_t sh_dmae_interrupt(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
struct sh_dmae_chan *sh_chan = data;
u32 chcr;
spin_lock(&sh_chan->desc_lock);
chcr = sh_dmae_readl(sh_chan, CHCR);
if (chcr & CHCR_TE) {
/* DMA stop */
......@@ -824,10 +851,13 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
tasklet_schedule(&sh_chan->tasklet);
}
spin_unlock(&sh_chan->desc_lock);
return ret;
}
static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev)
/* Called from error IRQ or NMI */
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
{
unsigned int handled = 0;
int i;
......@@ -839,22 +869,32 @@ static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev)
for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
struct sh_desc *desc;
LIST_HEAD(dl);
if (!sh_chan)
continue;
spin_lock(&sh_chan->desc_lock);
/* Stop the channel */
dmae_halt(sh_chan);
list_splice_init(&sh_chan->ld_queue, &dl);
spin_unlock(&sh_chan->desc_lock);
/* Complete all */
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
list_for_each_entry(desc, &dl, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
desc->mark = DESC_IDLE;
if (tx->callback)
tx->callback(tx->callback_param);
}
list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
spin_lock(&sh_chan->desc_lock);
list_splice(&dl, &sh_chan->ld_free);
spin_unlock(&sh_chan->desc_lock);
handled++;
}
......@@ -867,10 +907,11 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = data;
if (dmaor_read(shdev) & DMAOR_AE)
return IRQ_RETVAL(sh_dmae_reset(data));
else
if (!(dmaor_read(shdev) & DMAOR_AE))
return IRQ_NONE;
sh_dmae_reset(data);
return IRQ_HANDLED;
}
static void dmae_do_tasklet(unsigned long data)
......@@ -902,17 +943,11 @@ static void dmae_do_tasklet(unsigned long data)
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
{
unsigned int handled;
/* Fast path out if NMIF is not asserted for this controller */
if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
return false;
handled = sh_dmae_reset(shdev);
if (handled)
return true;
return false;
return sh_dmae_reset(shdev);
}
static int sh_dmae_nmi_handler(struct notifier_block *self,
......@@ -982,9 +1017,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
(unsigned long)new_sh_chan);
/* Init the channel */
dmae_init(new_sh_chan);
spin_lock_init(&new_sh_chan->desc_lock);
/* Init descripter manage list */
......@@ -1045,7 +1077,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
unsigned long irqflags = IRQF_DISABLED,
chan_flag[SH_DMAC_MAX_CHANNELS] = {};
unsigned long flags;
int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
int err, i, irq_cnt = 0, irqres = 0;
struct sh_dmae_device *shdev;
......@@ -1111,11 +1142,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
spin_lock_irqsave(&sh_dmae_lock, flags);
spin_lock_irq(&sh_dmae_lock);
list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
spin_unlock_irqrestore(&sh_dmae_lock, flags);
spin_unlock_irq(&sh_dmae_lock);
/* reset dma controller */
/* reset dma controller - only needed as a test */
err = sh_dmae_rst(shdev);
if (err)
goto rst_err;
......@@ -1218,15 +1249,18 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
eirq_err:
#endif
rst_err:
spin_lock_irqsave(&sh_dmae_lock, flags);
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
spin_unlock_irqrestore(&sh_dmae_lock, flags);
spin_unlock_irq(&sh_dmae_lock);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (dmars)
iounmap(shdev->dmars);
emapdmars:
iounmap(shdev->chan_reg);
synchronize_rcu();
emapchan:
kfree(shdev);
ealloc:
......@@ -1242,7 +1276,6 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct resource *res;
unsigned long flags;
int errirq = platform_get_irq(pdev, 0);
dma_async_device_unregister(&shdev->common);
......@@ -1250,9 +1283,9 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
if (errirq > 0)
free_irq(errirq, shdev);
spin_lock_irqsave(&sh_dmae_lock, flags);
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
spin_unlock_irqrestore(&sh_dmae_lock, flags);
spin_unlock_irq(&sh_dmae_lock);
/* channel data remove */
sh_dmae_chan_remove(shdev);
......@@ -1263,6 +1296,7 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
iounmap(shdev->dmars);
iounmap(shdev->chan_reg);
synchronize_rcu();
kfree(shdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......@@ -1281,12 +1315,78 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
sh_dmae_ctl_stop(shdev);
}
static int sh_dmae_runtime_suspend(struct device *dev)
{
return 0;
}
static int sh_dmae_runtime_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
return sh_dmae_rst(shdev);
}
#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
int i;
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
if (sh_chan->descs_allocated)
sh_chan->pm_error = pm_runtime_put_sync(dev);
}
return 0;
}
static int sh_dmae_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
int i;
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
struct sh_dmae_slave *param = sh_chan->common.private;
if (!sh_chan->descs_allocated)
continue;
if (!sh_chan->pm_error)
pm_runtime_get_sync(dev);
if (param) {
const struct sh_dmae_slave_config *cfg = param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
} else {
dmae_init(sh_chan);
}
}
return 0;
}
#else
#define sh_dmae_suspend NULL
#define sh_dmae_resume NULL
#endif
const struct dev_pm_ops sh_dmae_pm = {
.suspend = sh_dmae_suspend,
.resume = sh_dmae_resume,
.runtime_suspend = sh_dmae_runtime_suspend,
.runtime_resume = sh_dmae_runtime_resume,
};
static struct platform_driver sh_dmae_driver = {
.remove = __exit_p(sh_dmae_remove),
.shutdown = sh_dmae_shutdown,
.driver = {
.owner = THIS_MODULE,
.name = "sh-dma-engine",
.pm = &sh_dmae_pm,
},
};
......
......@@ -37,6 +37,7 @@ struct sh_dmae_chan {
int id; /* Raw id of this channel */
u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
};
struct sh_dmae_device {
......
......@@ -729,3 +729,4 @@ module_exit(sh_mobile_i2c_adap_exit);
MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:i2c-sh_mobile");
......@@ -105,7 +105,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
/* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, &clk->arch_flags);
table, NULL);
return 0;
}
......
......@@ -235,6 +235,11 @@ static void __init intc_subgroup_map(struct intc_desc_int *d)
irq_set_handler_data(irq, (void *)entry->handle);
/*
* Set the virtual IRQ as non-threadable.
*/
irq_set_nothread(irq);
irq_set_chained_handler(entry->pirq, intc_virq_handler);
add_virq_to_pirq(entry->pirq, irq);
......
......@@ -41,6 +41,7 @@
#include <linux/platform_device.h>
#include <linux/serial_sci.h>
#include <linux/notifier.h>
#include <linux/pm_runtime.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/ctype.h>
......@@ -560,6 +561,9 @@ static void sci_break_timer(unsigned long data)
{
struct sci_port *port = (struct sci_port *)data;
if (port->enable)
port->enable(&port->port);
if (sci_rxd_in(&port->port) == 0) {
port->break_flag = 1;
sci_schedule_break_timer(port);
......@@ -569,6 +573,9 @@ static void sci_break_timer(unsigned long data)
sci_schedule_break_timer(port);
} else
port->break_flag = 0;
if (port->disable)
port->disable(&port->port);
}
static int sci_handle_errors(struct uart_port *port)
......@@ -837,6 +844,8 @@ static void sci_clk_enable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
pm_runtime_get_sync(port->dev);
clk_enable(sci_port->iclk);
sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
clk_enable(sci_port->fclk);
......@@ -848,6 +857,8 @@ static void sci_clk_disable(struct uart_port *port)
clk_disable(sci_port->fclk);
clk_disable(sci_port->iclk);
pm_runtime_put_sync(port->dev);
}
static int sci_request_irq(struct sci_port *port)
......@@ -1756,6 +1767,8 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->enable = sci_clk_enable;
sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
pm_runtime_enable(&dev->dev);
}
sci_port->break_timer.data = (unsigned long)sci_port;
......@@ -1775,7 +1788,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
*
* For the muxed case there's nothing more to do.
*/
port->irq = p->irqs[SCIx_TXI_IRQ];
port->irq = p->irqs[SCIx_RXI_IRQ];
if (p->dma_dev)
dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
......@@ -1936,6 +1949,7 @@ static int sci_remove(struct platform_device *dev)
clk_put(port->iclk);
clk_put(port->fclk);
pm_runtime_disable(&dev->dev);
return 0;
}
......
......@@ -270,12 +270,12 @@
#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_ARCH_SH73A0) || \
defined(CONFIG_ARCH_SH7367) || \
defined(CONFIG_ARCH_SH7377)
defined(CONFIG_ARCH_SH7367)
#define SCIF_FNS(name, scif_offset, scif_size) \
CPU_SCIF_FNS(name, scif_offset, scif_size)
#elif defined(CONFIG_ARCH_SH7372)
#elif defined(CONFIG_ARCH_SH7377) || \
defined(CONFIG_ARCH_SH7372) || \
defined(CONFIG_ARCH_SH73A0)
#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
#define SCIF_FNS(name, scif_offset, scif_size) \
......@@ -313,9 +313,7 @@
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_ARCH_SH73A0) || \
defined(CONFIG_ARCH_SH7367) || \
defined(CONFIG_ARCH_SH7377)
defined(CONFIG_ARCH_SH7367)
SCIF_FNS(SCSMR, 0x00, 16)
SCIF_FNS(SCBRR, 0x04, 8)
......@@ -326,7 +324,9 @@ SCIF_FNS(SCFDR, 0x1c, 16)
SCIF_FNS(SCxTDR, 0x20, 8)
SCIF_FNS(SCxRDR, 0x24, 8)
SCIF_FNS(SCLSR, 0x00, 0)
#elif defined(CONFIG_ARCH_SH7372)
#elif defined(CONFIG_ARCH_SH7377) || \
defined(CONFIG_ARCH_SH7372) || \
defined(CONFIG_ARCH_SH73A0)
SCIF_FNS(SCSMR, 0x00, 16)
SCIF_FNS(SCBRR, 0x04, 8)
SCIF_FNS(SCSCR, 0x08, 16)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册