提交 fb591fbd 编写于 作者: L Linus Torvalds

Merge tag 'mmc-v4.5' of git://git.linaro.org/people/ulf.hansson/mmc

Pull MMC updates from Ulf Hansson:
 "MMC core:
   - Optimize boot time by detecting cards simultaneously
   - Make runtime resume default behavior for MMC/SD
   - Enable MMC/SD/SDIO devices to suspend/resume asynchronously
   - Allow more than 8 partitions per card
   - Introduce MMC_CAP2_NO_SDIO to prevent unsupported SDIO commands
   - Support the standard DT wakeup-source property
   - Fix driver strength switching for HS200 and HS400
   - Fix switch command timeout
   - Fix invalid vdd in voltage switch power cycle for SDIO

  MMC host:
   - sdhci: Restore behavior when setting VDD via external regulator
   - sdhci: A couple of changes/fixes related to the dma support
   - sdhci-tegra: Add Tegra210 support
   - sdhci-tegra: Support for UHS-I cards including tuning support
   - sdhci-of-at91: Add PM support
   - sh_mmcif: Rework dma channel handling
   - mvsdio: Delete platform data code path"

* tag 'mmc-v4.5' of git://git.linaro.org/people/ulf.hansson/mmc: (52 commits)
  mmc: dw_mmc: remove the unused quirks
  mmc: sdhci-pci: use to_pci_dev()
  mmc: cb710: use to_platform_device()
  mmc: tegra: use correct accessor for misc ctrl register
  mmc: tegra: enable UHS-I modes
  mmc: tegra: implement UHS tuning
  mmc: tegra: disable SPI_MODE_CLKEN
  mmc: tegra: implement module external clock change
  mmc: sdhci: restore behavior when setting VDD via external regulator
  mmc: It is not an error for the card to be removed while suspended
  mmc: block: Allow more than 8 partitions per card
  mmc: core: Optimize boot time by detecting cards simultaneously
  mmc: dw_mmc: use resource_size_t to store physical address
  mmc: core: fix __mmc_switch timeout caused by preempt
  mmc: usdhi6rol0: handle NULL data in timeout
  mmc: of_mmc_spi: Add IRQF_ONESHOT to interrupt flags
  mmc: mediatek: change some dev_err to dev_dbg
  mmc: enable MMC/SD/SDIO device to suspend/resume asynchronously
  mmc: sdhci: Fix sdhci_runtime_pm_bus_on/off()
  mmc: sdhci: 64-bit DMA actually has 4-byte alignment
  ...
......@@ -11,6 +11,7 @@ Required properties:
- "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
- "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
- "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
- "renesas,mmcif-r8a7793" for the MMCIF found in r8a7793 SoCs
- "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
- clocks: reference to the functional clock
......
......@@ -1896,7 +1896,6 @@ ATMEL AT91 / AT32 MCI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
S: Maintained
F: drivers/mmc/host/atmel-mci.c
F: drivers/mmc/host/atmel-mci-regs.h
ATMEL AT91 / AT32 SERIAL DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
......
......@@ -171,11 +171,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
static inline int mmc_get_devidx(struct gendisk *disk)
{
int devmaj = MAJOR(disk_devt(disk));
int devidx = MINOR(disk_devt(disk)) / perdev_minors;
if (!devmaj)
devidx = disk->first_minor / perdev_minors;
int devidx = disk->first_minor / perdev_minors;
return devidx;
}
......@@ -344,7 +340,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
idata = kzalloc(sizeof(*idata), GFP_KERNEL);
idata = kmalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
......@@ -364,7 +360,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
if (!idata->buf_bytes)
return idata;
idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
err = -ENOMEM;
goto idata_err;
......@@ -2244,6 +2240,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
md->disk->flags |= GENHD_FL_NO_PART_SCAN;
......
......@@ -349,6 +349,8 @@ int mmc_add_card(struct mmc_card *card)
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
ret = device_add(&card->dev);
if (ret)
return ret;
......
......@@ -55,7 +55,6 @@
*/
#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
static struct workqueue_struct *workqueue;
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
......@@ -66,21 +65,16 @@ static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(workqueue, work, delay);
}
/*
* Internal function. Flush all scheduled work from the MMC work queue.
*/
static void mmc_flush_scheduled_work(void)
{
flush_workqueue(workqueue);
/*
* We use the system_freezable_wq, because of two reasons.
* First, it allows several works (not the same work item) to be
* executed simultaneously. Second, the queue becomes frozen when
* userspace becomes frozen during system PM.
*/
return queue_delayed_work(system_freezable_wq, work, delay);
}
#ifdef CONFIG_FAIL_MMC_REQUEST
......@@ -1485,7 +1479,7 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
if (IS_ERR(mmc->supply.vmmc)) {
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "No vmmc regulator found\n");
dev_dbg(dev, "No vmmc regulator found\n");
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
if (ret > 0)
......@@ -1497,7 +1491,7 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
if (IS_ERR(mmc->supply.vqmmc)) {
if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_info(dev, "No vqmmc regulator found\n");
dev_dbg(dev, "No vqmmc regulator found\n");
}
return 0;
......@@ -2476,15 +2470,20 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
* Skip it if we already know that we do not support SDIO commands
*/
sdio_reset(host);
if (!(host->caps2 & MMC_CAP2_NO_SDIO))
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!mmc_attach_sdio(host))
return 0;
if (!(host->caps2 & MMC_CAP2_NO_SDIO))
if (!mmc_attach_sdio(host))
return 0;
if (!mmc_attach_sd(host))
return 0;
if (!mmc_attach_mmc(host))
......@@ -2498,9 +2497,6 @@ int _mmc_detect_card_removed(struct mmc_host *host)
{
int ret;
if (host->caps & MMC_CAP_NONREMOVABLE)
return 0;
if (!host->card || mmc_card_removed(host->card))
return 1;
......@@ -2536,6 +2532,9 @@ int mmc_detect_card_removed(struct mmc_host *host)
if (!card)
return 1;
if (host->caps & MMC_CAP_NONREMOVABLE)
return 0;
ret = mmc_card_removed(card);
/*
* The card will be considered unchanged unless we have been asked to
......@@ -2567,11 +2566,6 @@ void mmc_rescan(struct work_struct *work)
container_of(work, struct mmc_host, detect.work);
int i;
if (host->trigger_card_event && host->ops->card_event) {
host->ops->card_event(host);
host->trigger_card_event = false;
}
if (host->rescan_disable)
return;
......@@ -2580,6 +2574,13 @@ void mmc_rescan(struct work_struct *work)
return;
host->rescan_entered = 1;
if (host->trigger_card_event && host->ops->card_event) {
mmc_claim_host(host);
host->ops->card_event(host);
mmc_release_host(host);
host->trigger_card_event = false;
}
mmc_bus_get(host);
/*
......@@ -2611,15 +2612,14 @@ void mmc_rescan(struct work_struct *work)
*/
mmc_bus_put(host);
mmc_claim_host(host);
if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
host->ops->get_cd(host) == 0) {
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
goto out;
}
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
break;
......@@ -2663,7 +2663,6 @@ void mmc_stop_host(struct mmc_host *host)
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
......@@ -2759,14 +2758,13 @@ int mmc_flush_cache(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_flush_cache);
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
*/
int mmc_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
static int mmc_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
struct mmc_host *host = container_of(
notify_block, struct mmc_host, pm_notify);
......@@ -2813,6 +2811,17 @@ int mmc_pm_notify(struct notifier_block *notify_block,
return 0;
}
void mmc_register_pm_notifier(struct mmc_host *host)
{
host->pm_notify.notifier_call = mmc_pm_notify;
register_pm_notifier(&host->pm_notify);
}
void mmc_unregister_pm_notifier(struct mmc_host *host)
{
unregister_pm_notifier(&host->pm_notify);
}
#endif
/**
......@@ -2836,13 +2845,9 @@ static int __init mmc_init(void)
{
int ret;
workqueue = alloc_ordered_workqueue("kmmcd", 0);
if (!workqueue)
return -ENOMEM;
ret = mmc_register_bus();
if (ret)
goto destroy_workqueue;
return ret;
ret = mmc_register_host_class();
if (ret)
......@@ -2858,9 +2863,6 @@ static int __init mmc_init(void)
mmc_unregister_host_class();
unregister_bus:
mmc_unregister_bus();
destroy_workqueue:
destroy_workqueue(workqueue);
return ret;
}
......@@ -2869,7 +2871,6 @@ static void __exit mmc_exit(void)
sdio_unregister_bus();
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
}
subsys_initcall(mmc_init);
......
......@@ -90,5 +90,13 @@ int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
#ifdef CONFIG_PM_SLEEP
void mmc_register_pm_notifier(struct mmc_host *host);
void mmc_unregister_pm_notifier(struct mmc_host *host);
#else
static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
#endif
#endif
......@@ -21,7 +21,6 @@
#include <linux/export.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
......@@ -275,7 +274,8 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
if (of_property_read_bool(np, "keep-power-in-suspend"))
host->pm_caps |= MMC_PM_KEEP_POWER;
if (of_property_read_bool(np, "enable-sdio-wakeup"))
if (of_property_read_bool(np, "wakeup-source") ||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
if (of_property_read_bool(np, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
......@@ -348,9 +348,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
#endif
setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
/*
......@@ -395,7 +392,7 @@ int mmc_add_host(struct mmc_host *host)
#endif
mmc_start_host(host);
register_pm_notifier(&host->pm_notify);
mmc_register_pm_notifier(host);
return 0;
}
......@@ -412,7 +409,7 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
unregister_pm_notifier(&host->pm_notify);
mmc_unregister_pm_notifier(host);
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS
......
......@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_clock(host, max_dtr);
/* Switch card to HS mode */
val = EXT_CSD_TIMING_HS |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
......@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_clock(host, max_dtr);
/* Switch HS400 to HS DDR */
val = EXT_CSD_TIMING_HS |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time,
true, send_status, true);
......@@ -1907,16 +1905,8 @@ static int mmc_shutdown(struct mmc_host *host)
*/
static int mmc_resume(struct mmc_host *host)
{
int err = 0;
if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
err = _mmc_resume(host);
pm_runtime_set_active(&host->card->dev);
pm_runtime_mark_last_busy(&host->card->dev);
}
pm_runtime_enable(&host->card->dev);
return err;
return 0;
}
/*
......@@ -1944,12 +1934,9 @@ static int mmc_runtime_resume(struct mmc_host *host)
{
int err;
if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
err = _mmc_resume(host);
if (err)
pr_err("%s: error %d doing aggressive resume\n",
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
mmc_hostname(host), err);
return 0;
......
......@@ -489,6 +489,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned long timeout;
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
bool expired = false;
mmc_retune_hold(host);
......@@ -545,6 +546,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (send_status) {
/*
* Due to the possibility of being preempted after
* sending the status command, check the expiration
* time first.
*/
expired = time_after(jiffies, timeout);
err = __mmc_send_status(card, &status, ignore_crc);
if (err)
goto out;
......@@ -565,7 +572,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
}
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(host), __func__);
err = -ETIMEDOUT;
......
......@@ -16,7 +16,7 @@ struct mmc_pwrseq_ops {
};
struct mmc_pwrseq {
struct mmc_pwrseq_ops *ops;
const struct mmc_pwrseq_ops *ops;
};
#ifdef CONFIG_OF
......
......@@ -51,7 +51,7 @@ static void mmc_pwrseq_emmc_free(struct mmc_host *host)
kfree(pwrseq);
}
static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
.post_power_on = mmc_pwrseq_emmc_reset,
.free = mmc_pwrseq_emmc_free,
};
......
......@@ -87,7 +87,7 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
kfree(pwrseq);
}
static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
.pre_power_on = mmc_pwrseq_simple_pre_power_on,
.post_power_on = mmc_pwrseq_simple_post_power_on,
.power_off = mmc_pwrseq_simple_power_off,
......
......@@ -1128,16 +1128,8 @@ static int _mmc_sd_resume(struct mmc_host *host)
*/
static int mmc_sd_resume(struct mmc_host *host)
{
int err = 0;
if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
err = _mmc_sd_resume(host);
pm_runtime_set_active(&host->card->dev);
pm_runtime_mark_last_busy(&host->card->dev);
}
pm_runtime_enable(&host->card->dev);
return err;
return 0;
}
/*
......@@ -1165,12 +1157,9 @@ static int mmc_sd_runtime_resume(struct mmc_host *host)
{
int err;
if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
err = _mmc_sd_resume(host);
if (err)
pr_err("%s: error %d doing aggressive resume\n",
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
mmc_hostname(host), err);
return 0;
......
......@@ -630,7 +630,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
*/
if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
ocr);
ocr_card);
if (err == -EAGAIN) {
sdio_reset(host);
mmc_go_idle(host);
......
......@@ -322,6 +322,7 @@ int sdio_add_func(struct sdio_func *func)
sdio_set_of_node(func);
sdio_acpi_set_handle(func);
device_enable_async_suspend(&func->dev);
ret = device_add(&func->dev);
if (ret == 0)
sdio_func_set_present(func);
......
......@@ -455,6 +455,7 @@ config MMC_TIFM_SD
config MMC_MVSDIO
tristate "Marvell MMC/SD/SDIO host driver"
depends on PLAT_ORION
depends on OF
---help---
This selects the Marvell SDIO host driver.
SDIO may currently be found on the Kirkwood 88F6281 and 88F6192
......
/*
* Atmel MultiMedia Card Interface driver
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
* Registers and bitfields marked with [2] are only available in MCI2
*/
#ifndef __DRIVERS_MMC_ATMEL_MCI_H__
#define __DRIVERS_MMC_ATMEL_MCI_H__
/* MCI Register Definitions */
#define ATMCI_CR 0x0000 /* Control */
# define ATMCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
# define ATMCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
# define ATMCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */
# define ATMCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */
# define ATMCI_CR_SWRST ( 1 << 7) /* Software Reset */
#define ATMCI_MR 0x0004 /* Mode */
# define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
# define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
# define ATMCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
# define ATMCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
# define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */
# define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */
# define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */
# define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */
#define ATMCI_DTOR 0x0008 /* Data Timeout */
# define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
# define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
#define ATMCI_SDCR 0x000c /* SD Card / SDIO */
# define ATMCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
# define ATMCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
# define ATMCI_SDCSEL_MASK ( 3 << 0)
# define ATMCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
# define ATMCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
# define ATMCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */
# define ATMCI_SDCBUS_MASK ( 3 << 6)
#define ATMCI_ARGR 0x0010 /* Command Argument */
#define ATMCI_CMDR 0x0014 /* Command */
# define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
# define ATMCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */
# define ATMCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */
# define ATMCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */
# define ATMCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */
# define ATMCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */
# define ATMCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */
# define ATMCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */
# define ATMCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */
# define ATMCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */
# define ATMCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */
# define ATMCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */
# define ATMCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */
# define ATMCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */
# define ATMCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */
# define ATMCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */
# define ATMCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */
# define ATMCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */
# define ATMCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */
# define ATMCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */
# define ATMCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */
# define ATMCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */
#define ATMCI_BLKR 0x0018 /* Block */
# define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
# define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
# define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
# define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
#define ATMCI_RSPR 0x0020 /* Response 0 */
#define ATMCI_RSPR1 0x0024 /* Response 1 */
#define ATMCI_RSPR2 0x0028 /* Response 2 */
#define ATMCI_RSPR3 0x002c /* Response 3 */
#define ATMCI_RDR 0x0030 /* Receive Data */
#define ATMCI_TDR 0x0034 /* Transmit Data */
#define ATMCI_SR 0x0040 /* Status */
#define ATMCI_IER 0x0044 /* Interrupt Enable */
#define ATMCI_IDR 0x0048 /* Interrupt Disable */
#define ATMCI_IMR 0x004c /* Interrupt Mask */
# define ATMCI_CMDRDY ( 1 << 0) /* Command Ready */
# define ATMCI_RXRDY ( 1 << 1) /* Receiver Ready */
# define ATMCI_TXRDY ( 1 << 2) /* Transmitter Ready */
# define ATMCI_BLKE ( 1 << 3) /* Data Block Ended */
# define ATMCI_DTIP ( 1 << 4) /* Data Transfer In Progress */
# define ATMCI_NOTBUSY ( 1 << 5) /* Data Not Busy */
# define ATMCI_ENDRX ( 1 << 6) /* End of RX Buffer */
# define ATMCI_ENDTX ( 1 << 7) /* End of TX Buffer */
# define ATMCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */
# define ATMCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */
# define ATMCI_SDIOWAIT ( 1 << 12) /* SDIO Read Wait Operation Status */
# define ATMCI_CSRCV ( 1 << 13) /* CE-ATA Completion Signal Received */
# define ATMCI_RXBUFF ( 1 << 14) /* RX Buffer Full */
# define ATMCI_TXBUFE ( 1 << 15) /* TX Buffer Empty */
# define ATMCI_RINDE ( 1 << 16) /* Response Index Error */
# define ATMCI_RDIRE ( 1 << 17) /* Response Direction Error */
# define ATMCI_RCRCE ( 1 << 18) /* Response CRC Error */
# define ATMCI_RENDE ( 1 << 19) /* Response End Bit Error */
# define ATMCI_RTOE ( 1 << 20) /* Response Time-Out Error */
# define ATMCI_DCRCE ( 1 << 21) /* Data CRC Error */
# define ATMCI_DTOE ( 1 << 22) /* Data Time-Out Error */
# define ATMCI_CSTOE ( 1 << 23) /* Completion Signal Time-out Error */
# define ATMCI_BLKOVRE ( 1 << 24) /* DMA Block Overrun Error */
# define ATMCI_DMADONE ( 1 << 25) /* DMA Transfer Done */
# define ATMCI_FIFOEMPTY ( 1 << 26) /* FIFO Empty Flag */
# define ATMCI_XFRDONE ( 1 << 27) /* Transfer Done Flag */
# define ATMCI_ACKRCV ( 1 << 28) /* Boot Operation Acknowledge Received */
# define ATMCI_ACKRCVE ( 1 << 29) /* Boot Operation Acknowledge Error */
# define ATMCI_OVRE ( 1 << 30) /* RX Overrun Error */
# define ATMCI_UNRE ( 1 << 31) /* TX Underrun Error */
#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
# define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
# define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
# define ATMCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */
#define ATMCI_CFG 0x0054 /* Configuration[2] */
# define ATMCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */
# define ATMCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */
# define ATMCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */
# define ATMCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */
#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
# define ATMCI_WP_EN ( 1 << 0) /* WP Enable */
# define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
# define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
# define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
#define ATMCI_VERSION 0x00FC /* Version */
#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
/* This is not including the FIFO Aperture on MCI2 */
#define ATMCI_REGS_SIZE 0x100
/* Register access macros */
#ifdef CONFIG_AVR32
#define atmci_readl(port, reg) \
__raw_readl((port)->regs + reg)
#define atmci_writel(port, reg, value) \
__raw_writel((value), (port)->regs + reg)
#else
#define atmci_readl(port, reg) \
readl_relaxed((port)->regs + reg)
#define atmci_writel(port, reg, value) \
writel_relaxed((value), (port)->regs + reg)
#endif
/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
#ifdef CONFIG_AVR32
# define ATMCI_PDC_CONNECTED 0
#else
# define ATMCI_PDC_CONNECTED 1
#endif
/*
* Fix sconfig's burst size according to atmel MCI. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
*
* This can be done by finding most significant bit set.
*/
static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
{
if (maxburst > 1)
return fls(maxburst) - 2;
else
return 0;
}
#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
......@@ -44,7 +44,141 @@
#include <asm/io.h>
#include <asm/unaligned.h>
#include "atmel-mci-regs.h"
/*
* Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
* Registers and bitfields marked with [2] are only available in MCI2
*/
/* MCI Register Definitions */
#define ATMCI_CR 0x0000 /* Control */
#define ATMCI_CR_MCIEN BIT(0) /* MCI Enable */
#define ATMCI_CR_MCIDIS BIT(1) /* MCI Disable */
#define ATMCI_CR_PWSEN BIT(2) /* Power Save Enable */
#define ATMCI_CR_PWSDIS BIT(3) /* Power Save Disable */
#define ATMCI_CR_SWRST BIT(7) /* Software Reset */
#define ATMCI_MR 0x0004 /* Mode */
#define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
#define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
#define ATMCI_MR_RDPROOF BIT(11) /* Read Proof */
#define ATMCI_MR_WRPROOF BIT(12) /* Write Proof */
#define ATMCI_MR_PDCFBYTE BIT(13) /* Force Byte Transfer */
#define ATMCI_MR_PDCPADV BIT(14) /* Padding Value */
#define ATMCI_MR_PDCMODE BIT(15) /* PDC-oriented Mode */
#define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */
#define ATMCI_DTOR 0x0008 /* Data Timeout */
#define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
#define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
#define ATMCI_SDCR 0x000c /* SD Card / SDIO */
#define ATMCI_SDCSEL_SLOT_A (0 << 0) /* Select SD slot A */
#define ATMCI_SDCSEL_SLOT_B (1 << 0) /* Select SD slot A */
#define ATMCI_SDCSEL_MASK (3 << 0)
#define ATMCI_SDCBUS_1BIT (0 << 6) /* 1-bit data bus */
#define ATMCI_SDCBUS_4BIT (2 << 6) /* 4-bit data bus */
#define ATMCI_SDCBUS_8BIT (3 << 6) /* 8-bit data bus[2] */
#define ATMCI_SDCBUS_MASK (3 << 6)
#define ATMCI_ARGR 0x0010 /* Command Argument */
#define ATMCI_CMDR 0x0014 /* Command */
#define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
#define ATMCI_CMDR_RSPTYP_NONE (0 << 6) /* No response */
#define ATMCI_CMDR_RSPTYP_48BIT (1 << 6) /* 48-bit response */
#define ATMCI_CMDR_RSPTYP_136BIT (2 << 6) /* 136-bit response */
#define ATMCI_CMDR_SPCMD_INIT (1 << 8) /* Initialization command */
#define ATMCI_CMDR_SPCMD_SYNC (2 << 8) /* Synchronized command */
#define ATMCI_CMDR_SPCMD_INT (4 << 8) /* Interrupt command */
#define ATMCI_CMDR_SPCMD_INTRESP (5 << 8) /* Interrupt response */
#define ATMCI_CMDR_OPDCMD (1 << 11) /* Open Drain */
#define ATMCI_CMDR_MAXLAT_5CYC (0 << 12) /* Max latency 5 cycles */
#define ATMCI_CMDR_MAXLAT_64CYC (1 << 12) /* Max latency 64 cycles */
#define ATMCI_CMDR_START_XFER (1 << 16) /* Start data transfer */
#define ATMCI_CMDR_STOP_XFER (2 << 16) /* Stop data transfer */
#define ATMCI_CMDR_TRDIR_WRITE (0 << 18) /* Write data */
#define ATMCI_CMDR_TRDIR_READ (1 << 18) /* Read data */
#define ATMCI_CMDR_BLOCK (0 << 19) /* Single-block transfer */
#define ATMCI_CMDR_MULTI_BLOCK (1 << 19) /* Multi-block transfer */
#define ATMCI_CMDR_STREAM (2 << 19) /* MMC Stream transfer */
#define ATMCI_CMDR_SDIO_BYTE (4 << 19) /* SDIO Byte transfer */
#define ATMCI_CMDR_SDIO_BLOCK (5 << 19) /* SDIO Block transfer */
#define ATMCI_CMDR_SDIO_SUSPEND (1 << 24) /* SDIO Suspend Command */
#define ATMCI_CMDR_SDIO_RESUME (2 << 24) /* SDIO Resume Command */
#define ATMCI_BLKR 0x0018 /* Block */
#define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
#define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
#define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
#define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
#define ATMCI_RSPR 0x0020 /* Response 0 */
#define ATMCI_RSPR1 0x0024 /* Response 1 */
#define ATMCI_RSPR2 0x0028 /* Response 2 */
#define ATMCI_RSPR3 0x002c /* Response 3 */
#define ATMCI_RDR 0x0030 /* Receive Data */
#define ATMCI_TDR 0x0034 /* Transmit Data */
#define ATMCI_SR 0x0040 /* Status */
#define ATMCI_IER 0x0044 /* Interrupt Enable */
#define ATMCI_IDR 0x0048 /* Interrupt Disable */
#define ATMCI_IMR 0x004c /* Interrupt Mask */
#define ATMCI_CMDRDY BIT(0) /* Command Ready */
#define ATMCI_RXRDY BIT(1) /* Receiver Ready */
#define ATMCI_TXRDY BIT(2) /* Transmitter Ready */
#define ATMCI_BLKE BIT(3) /* Data Block Ended */
#define ATMCI_DTIP BIT(4) /* Data Transfer In Progress */
#define ATMCI_NOTBUSY BIT(5) /* Data Not Busy */
#define ATMCI_ENDRX BIT(6) /* End of RX Buffer */
#define ATMCI_ENDTX BIT(7) /* End of TX Buffer */
#define ATMCI_SDIOIRQA BIT(8) /* SDIO IRQ in slot A */
#define ATMCI_SDIOIRQB BIT(9) /* SDIO IRQ in slot B */
#define ATMCI_SDIOWAIT BIT(12) /* SDIO Read Wait Operation Status */
#define ATMCI_CSRCV BIT(13) /* CE-ATA Completion Signal Received */
#define ATMCI_RXBUFF BIT(14) /* RX Buffer Full */
#define ATMCI_TXBUFE BIT(15) /* TX Buffer Empty */
#define ATMCI_RINDE BIT(16) /* Response Index Error */
#define ATMCI_RDIRE BIT(17) /* Response Direction Error */
#define ATMCI_RCRCE BIT(18) /* Response CRC Error */
#define ATMCI_RENDE BIT(19) /* Response End Bit Error */
#define ATMCI_RTOE BIT(20) /* Response Time-Out Error */
#define ATMCI_DCRCE BIT(21) /* Data CRC Error */
#define ATMCI_DTOE BIT(22) /* Data Time-Out Error */
#define ATMCI_CSTOE BIT(23) /* Completion Signal Time-out Error */
#define ATMCI_BLKOVRE BIT(24) /* DMA Block Overrun Error */
#define ATMCI_DMADONE BIT(25) /* DMA Transfer Done */
#define ATMCI_FIFOEMPTY BIT(26) /* FIFO Empty Flag */
#define ATMCI_XFRDONE BIT(27) /* Transfer Done Flag */
#define ATMCI_ACKRCV BIT(28) /* Boot Operation Acknowledge Received */
#define ATMCI_ACKRCVE BIT(29) /* Boot Operation Acknowledge Error */
#define ATMCI_OVRE BIT(30) /* RX Overrun Error */
#define ATMCI_UNRE BIT(31) /* TX Underrun Error */
#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
#define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
#define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
#define ATMCI_DMAEN BIT(8) /* DMA Hardware Handshaking Enable */
#define ATMCI_CFG 0x0054 /* Configuration[2] */
#define ATMCI_CFG_FIFOMODE_1DATA BIT(0) /* MCI Internal FIFO control mode */
#define ATMCI_CFG_FERRCTRL_COR BIT(4) /* Flow Error flag reset control mode */
#define ATMCI_CFG_HSMODE BIT(8) /* High Speed Mode */
#define ATMCI_CFG_LSYNC BIT(12) /* Synchronize on the last block */
#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
#define ATMCI_WP_EN BIT(0) /* WP Enable */
#define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
#define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
#define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
#define ATMCI_VERSION 0x00FC /* Version */
#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
/* This is not including the FIFO Aperture on MCI2 */
#define ATMCI_REGS_SIZE 0x100
/* Register access macros */
#define atmci_readl(port, reg) \
__raw_readl((port)->regs + reg)
#define atmci_writel(port, reg, value) \
__raw_writel((value), (port)->regs + reg)
/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
#ifdef CONFIG_AVR32
# define ATMCI_PDC_CONNECTED 0
#else
# define ATMCI_PDC_CONNECTED 1
#endif
#define AUTOSUSPEND_DELAY 50
......@@ -584,6 +718,29 @@ static inline unsigned int atmci_get_version(struct atmel_mci *host)
return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
}
/*
* Fix sconfig's burst size according to atmel MCI. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
* With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2,
* 8 -> 3, 16 -> 4.
*
* This can be done by finding most significant bit set.
*/
static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
unsigned int maxburst)
{
unsigned int version = atmci_get_version(host);
unsigned int offset = 2;
if (version >= 0x600)
offset = 1;
if (maxburst > 1)
return fls(maxburst) - offset;
else
return 0;
}
static void atmci_timeout_timer(unsigned long data)
{
struct atmel_mci *host;
......@@ -1034,11 +1191,13 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
maxburst = atmci_convert_chksize(host,
host->dma_conf.src_maxburst);
} else {
direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
maxburst = atmci_convert_chksize(host,
host->dma_conf.dst_maxburst);
}
if (host->caps.has_dma_conf_reg)
......
......@@ -29,8 +29,7 @@ static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot)
static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc)
{
struct platform_device *pdev = container_of(mmc_dev(mmc),
struct platform_device, dev);
struct platform_device *pdev = to_platform_device(mmc_dev(mmc));
return cb710_pdev_to_slot(pdev);
}
......
......@@ -60,7 +60,7 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Get registers' physical base address */
host->phy_regs = (void *)(regs->start);
host->phy_regs = regs->start;
host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
......
......@@ -239,20 +239,12 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
return 0;
}
/* Common capabilities of RK3288 SoC */
static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
MMC_CAP_RUNTIME_RESUME, /* emmc */
MMC_CAP_RUNTIME_RESUME, /* sdmmc */
MMC_CAP_RUNTIME_RESUME, /* sdio0 */
MMC_CAP_RUNTIME_RESUME, /* sdio1 */
};
static const struct dw_mci_drv_data rk2928_drv_data = {
.prepare_command = dw_mci_rockchip_prepare_command,
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
.caps = dw_mci_rk3288_dwmmc_caps,
.prepare_command = dw_mci_rockchip_prepare_command,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
......
......@@ -699,7 +699,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
int ret = 0;
/* Set external dma config: burst size, burst width */
cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset);
cfg.dst_addr = host->phy_regs + fifo_offset;
cfg.src_addr = cfg.dst_addr;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
......@@ -1634,12 +1634,6 @@ static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
else
cmd->error = 0;
if (cmd->error) {
/* newer ip versions need a delay between retries */
if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
mdelay(20);
}
return cmd->error;
}
......@@ -2355,16 +2349,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
/*
* DTO fix - version 2.10a and below, and only if internal DMA
* is configured.
*/
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
if (!pending &&
((mci_readl(host, STATUS) >> 17) & 0x1fff))
pending |= SDMMC_INT_DATA_OVER;
}
if (pending) {
/* Check volt switch first, since it can look like an error */
if ((host->state == STATE_SENDING_CMD11) &&
......@@ -3165,9 +3149,6 @@ int dw_mci_probe(struct dw_mci *host)
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
return 0;
err_dmaunmap:
......
......@@ -972,7 +972,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
data->bytes_xfered = data->blocks * data->blksz;
} else {
dev_err(host->dev, "interrupt events: %x\n", events);
dev_dbg(host->dev, "interrupt events: %x\n", events);
msdc_reset_hw(host);
host->error |= REQ_DAT_ERR;
data->bytes_xfered = 0;
......@@ -982,10 +982,10 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
else if (events & MSDC_INT_DATCRCERR)
data->error = -EILSEQ;
dev_err(host->dev, "%s: cmd=%d; blocks=%d",
dev_dbg(host->dev, "%s: cmd=%d; blocks=%d",
__func__, mrq->cmd->opcode, data->blocks);
dev_err(host->dev, "data_error=%d xfer_size=%d\n",
(int)data->error, data->bytes_xfered);
dev_dbg(host->dev, "data_error=%d xfer_size=%d\n",
(int)data->error, data->bytes_xfered);
}
msdc_data_xfer_next(host, mrq, data);
......@@ -1543,7 +1543,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->f_min = host->src_clk_freq / (4 * 255);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps |= MMC_CAP_RUNTIME_RESUME;
/* MMC core transfer sizes tunable parameters */
mmc->max_segs = MAX_BD_NUM;
mmc->max_seg_size = BDMA_DESC_BUFLEN;
......
......@@ -20,15 +20,12 @@
#include <linux/scatterlist.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <asm/sizes.h>
#include <asm/unaligned.h>
#include <linux/platform_data/mmc-mvsdio.h>
#include "mvsdio.h"
......@@ -704,6 +701,10 @@ static int mvsd_probe(struct platform_device *pdev)
struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
......@@ -727,8 +728,12 @@ static int mvsd_probe(struct platform_device *pdev)
* fixed rate clock).
*/
host->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "no clock associated\n");
ret = -EINVAL;
goto out;
}
clk_prepare_enable(host->clk);
mmc->ops = &mvsd_ops;
......@@ -744,45 +749,10 @@ static int mvsd_probe(struct platform_device *pdev)
mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
if (np) {
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
ret = -EINVAL;
goto out;
}
host->base_clock = clk_get_rate(host->clk) / 2;
ret = mmc_of_parse(mmc);
if (ret < 0)
goto out;
} else {
const struct mvsdio_platform_data *mvsd_data;
mvsd_data = pdev->dev.platform_data;
if (!mvsd_data) {
ret = -ENXIO;
goto out;
}
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ |
MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
host->base_clock = mvsd_data->clock / 2;
/* GPIO 0 regarded as invalid for backward compatibility */
if (mvsd_data->gpio_card_detect &&
gpio_is_valid(mvsd_data->gpio_card_detect)) {
ret = mmc_gpio_request_cd(mmc,
mvsd_data->gpio_card_detect,
0);
if (ret)
goto out;
} else {
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
if (mvsd_data->gpio_write_protect &&
gpio_is_valid(mvsd_data->gpio_write_protect))
mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect);
}
host->base_clock = clk_get_rate(host->clk) / 2;
ret = mmc_of_parse(mmc);
if (ret < 0)
goto out;
if (maxfreq)
mmc->f_max = maxfreq;
......
......@@ -55,8 +55,8 @@ static int of_mmc_spi_init(struct device *dev,
{
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0,
dev_name(dev), mmc);
return request_threaded_irq(oms->detect_irq, NULL, irqhandler,
IRQF_ONESHOT, dev_name(dev), mmc);
}
static void of_mmc_spi_exit(struct device *dev, void *mmc)
......
......@@ -2250,10 +2250,8 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
if (host->rx_chan)
dma_release_channel(host->rx_chan);
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
......
......@@ -76,6 +76,7 @@
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP 0x1
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
/* pinctrl state */
......@@ -489,9 +490,11 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP;
if (imx_data->boarddata.tuning_step)
if (imx_data->boarddata.tuning_step) {
tuning_ctrl &= ~ESDHC_TUNING_STEP_MASK;
tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT;
writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
}
writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
} else {
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
......
......@@ -21,6 +21,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
......@@ -51,6 +53,60 @@ static const struct of_device_id sdhci_at91_dt_match[] = {
{}
};
#ifdef CONFIG_PM
static int sdhci_at91_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = pltfm_host->priv;
int ret;
ret = sdhci_runtime_suspend_host(host);
clk_disable_unprepare(priv->gck);
clk_disable_unprepare(priv->hclock);
clk_disable_unprepare(priv->mainck);
return ret;
}
static int sdhci_at91_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = pltfm_host->priv;
int ret;
ret = clk_prepare_enable(priv->mainck);
if (ret) {
dev_err(dev, "can't enable mainck\n");
return ret;
}
ret = clk_prepare_enable(priv->hclock);
if (ret) {
dev_err(dev, "can't enable hclock\n");
return ret;
}
ret = clk_prepare_enable(priv->gck);
if (ret) {
dev_err(dev, "can't enable gck\n");
return ret;
}
return sdhci_runtime_resume_host(host);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend,
sdhci_at91_runtime_resume,
NULL)
};
static int sdhci_at91_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
......@@ -144,12 +200,23 @@ static int sdhci_at91_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
ret = sdhci_add_host(host);
if (ret)
goto clocks_disable_unprepare;
goto pm_runtime_disable;
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
clocks_disable_unprepare:
clk_disable_unprepare(priv->gck);
clk_disable_unprepare(priv->mainck);
......@@ -165,6 +232,10 @@ static int sdhci_at91_remove(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = pltfm_host->priv;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(priv->gck);
......@@ -178,7 +249,7 @@ static struct platform_driver sdhci_at91_driver = {
.driver = {
.name = "sdhci-at91",
.of_match_table = sdhci_at91_dt_match,
.pm = SDHCI_PLTFM_PMOPS,
.pm = &sdhci_at91_dev_pm_ops,
},
.probe = sdhci_at91_probe,
.remove = sdhci_at91_remove,
......
......@@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device_node *np;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
int ret;
np = pdev->dev.of_node;
......@@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
pltfm_host = sdhci_priv(host);
esdhc = pltfm_host->priv;
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
......
......@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
if (sdhci_pci_spt_drive_strength > 0)
drive_strength = sdhci_pci_spt_drive_strength & 0xf;
else
drive_strength = 1; /* 33-ohm */
drive_strength = 0; /* Default 50-ohm */
if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
drive_strength = 0; /* Default 50-ohm */
......@@ -1464,7 +1464,7 @@ static int sdhci_pci_resume(struct device *dev)
static int sdhci_pci_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
......@@ -1500,7 +1500,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
static int sdhci_pci_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
......
......@@ -104,7 +104,8 @@ void sdhci_get_of_property(struct platform_device *pdev)
if (of_find_property(np, "keep-power-in-suspend", NULL))
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
if (of_find_property(np, "enable-sdio-wakeup", NULL))
if (of_property_read_bool(np, "wakeup-source") ||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
}
#else
......
......@@ -22,12 +22,20 @@
#include <linux/of_device.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/gpio/consumer.h>
#include "sdhci-pltfm.h"
/* Tegra SDHOST controller vendor register definitions */
#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
......@@ -37,9 +45,9 @@
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
#define NVQUIRK_DISABLE_SDR50 BIT(3)
#define NVQUIRK_DISABLE_SDR104 BIT(4)
#define NVQUIRK_DISABLE_DDR50 BIT(5)
#define NVQUIRK_ENABLE_SDR50 BIT(3)
#define NVQUIRK_ENABLE_SDR104 BIT(4)
#define NVQUIRK_ENABLE_DDR50 BIT(5)
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
......@@ -49,6 +57,7 @@ struct sdhci_tegra_soc_data {
struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
bool ddr_signaling;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
......@@ -124,25 +133,33 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl;
u32 misc_ctrl, clk_ctrl;
sdhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
/* Erratum: Enable SDHCI spec v3.00 support */
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
/* Don't advertise UHS modes which aren't supported yet */
if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50)
misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50)
misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104)
misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
/* Advertise UHS modes as supported by host */
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
tegra_host->ddr_signaling = false;
}
static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
......@@ -164,15 +181,99 @@ static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
unsigned long host_clk;
if (!clock)
return;
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
host->max_clk = clk_get_rate(pltfm_host->clk);
return sdhci_set_clock(host, clock);
}
static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
unsigned timing)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = pltfm_host->priv;
if (timing == MMC_TIMING_UHS_DDR50)
tegra_host->ddr_signaling = true;
return sdhci_set_uhs_signaling(host, timing);
}
static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
/*
* DDR modes require the host to run at double the card frequency, so
* the maximum rate we can support is half of the module input clock.
*/
return clk_round_rate(pltfm_host->clk, UINT_MAX) / 2;
}
static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
{
u32 reg;
reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
}
static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
{
unsigned int min, max;
/*
* Start search for minimum tap value at 10, as smaller values are
* may wrongly be reported as working but fail at higher speeds,
* according to the TRM.
*/
min = 10;
while (min < 255) {
tegra_sdhci_set_tap(host, min);
if (!mmc_send_tuning(host->mmc, opcode, NULL))
break;
min++;
}
/* Find the maximum tap value that still passes. */
max = min + 1;
while (max < 255) {
tegra_sdhci_set_tap(host, max);
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
max--;
break;
}
max++;
}
/* The TRM states the ideal tap value is at 75% in the passing range. */
tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
return mmc_send_tuning(host->mmc, opcode, NULL);
}
static const struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = sdhci_set_clock,
.set_clock = tegra_sdhci_set_clock,
.set_bus_width = tegra_sdhci_set_bus_width,
.reset = tegra_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.platform_execute_tuning = tegra_sdhci_execute_tuning,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
.get_max_clock = tegra_sdhci_get_max_clock,
};
static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
......@@ -184,7 +285,7 @@ static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
.ops = &tegra_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra20 = {
static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
.pdata = &sdhci_tegra20_pdata,
.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
NVQUIRK_ENABLE_BLOCK_GAP_DET,
......@@ -197,14 +298,15 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &tegra_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra30 = {
static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
NVQUIRK_DISABLE_SDR50 |
NVQUIRK_DISABLE_SDR104,
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104,
};
static const struct sdhci_ops tegra114_sdhci_ops = {
......@@ -212,11 +314,12 @@ static const struct sdhci_ops tegra114_sdhci_ops = {
.read_w = tegra_sdhci_readw,
.write_w = tegra_sdhci_writew,
.write_l = tegra_sdhci_writel,
.set_clock = sdhci_set_clock,
.set_clock = tegra_sdhci_set_clock,
.set_bus_width = tegra_sdhci_set_bus_width,
.reset = tegra_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.platform_execute_tuning = tegra_sdhci_execute_tuning,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
.get_max_clock = tegra_sdhci_get_max_clock,
};
static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
......@@ -226,17 +329,34 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &tegra114_sdhci_ops,
};
static struct sdhci_tegra_soc_data soc_data_tegra114 = {
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
.nvquirks = NVQUIRK_DISABLE_SDR50 |
NVQUIRK_DISABLE_DDR50 |
NVQUIRK_DISABLE_SDR104,
.nvquirks = NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_DDR50 |
NVQUIRK_ENABLE_SDR104,
};
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &tegra114_sdhci_ops,
};
static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
.pdata = &sdhci_tegra210_pdata,
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
......@@ -271,6 +391,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
rc = -ENOMEM;
goto err_alloc_tegra_host;
}
tegra_host->ddr_signaling = false;
tegra_host->soc_data = soc_data;
pltfm_host->priv = tegra_host;
......@@ -278,6 +399,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (rc)
goto err_parse_dt;
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
GPIOD_OUT_HIGH);
if (IS_ERR(tegra_host->power_gpio)) {
......
......@@ -492,7 +492,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
host->align_buffer, host->align_buffer_sz, direction);
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
goto fail;
BUG_ON(host->align_addr & host->align_mask);
BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
host->sg_count = sdhci_pre_dma_transfer(host, data);
if (host->sg_count < 0)
......@@ -514,8 +514,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
* the (up to three) bytes that screw up the
* alignment.
*/
offset = (host->align_sz - (addr & host->align_mask)) &
host->align_mask;
offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
SDHCI_ADMA2_MASK;
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
......@@ -529,8 +529,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
BUG_ON(offset > 65536);
align += host->align_sz;
align_addr += host->align_sz;
align += SDHCI_ADMA2_ALIGN;
align_addr += SDHCI_ADMA2_ALIGN;
desc += host->desc_sz;
......@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
BUG_ON(len > 65536);
/* tran, valid */
sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
desc += host->desc_sz;
if (len) {
/* tran, valid */
sdhci_adma_write_desc(host, desc, addr, len,
ADMA2_TRAN_VALID);
desc += host->desc_sz;
}
/*
* If this triggers then we have a calculation bug
......@@ -608,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
/* Do a quick scan of the SG list for any unaligned mappings */
has_unaligned = false;
for_each_sg(data->sg, sg, host->sg_count, i)
if (sg_dma_address(sg) & host->align_mask) {
if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
has_unaligned = true;
break;
}
......@@ -620,15 +623,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
align = host->align_buffer;
for_each_sg(data->sg, sg, host->sg_count, i) {
if (sg_dma_address(sg) & host->align_mask) {
size = host->align_sz -
(sg_dma_address(sg) & host->align_mask);
if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
size = SDHCI_ADMA2_ALIGN -
(sg_dma_address(sg) & SDHCI_ADMA2_MASK);
buffer = sdhci_kmap_atomic(sg, &flags);
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
align += host->align_sz;
align += SDHCI_ADMA2_ALIGN;
}
}
}
......@@ -768,8 +771,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
if (unlikely(broken)) {
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->length & 0x3) {
DBG("Reverting to PIO because of "
"transfer size (%d)\n",
DBG("Reverting to PIO because of transfer size (%d)\n",
sg->length);
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
......@@ -803,8 +805,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
if (unlikely(broken)) {
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 0x3) {
DBG("Reverting to PIO because of "
"bad alignment\n");
DBG("Reverting to PIO because of bad alignment\n");
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
}
......@@ -1016,8 +1017,8 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
if (timeout == 0) {
pr_err("%s: Controller never released "
"inhibit bit(s).\n", mmc_hostname(host->mmc));
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
cmd->error = -EIO;
tasklet_schedule(&host->finish_tasklet);
......@@ -1254,8 +1255,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
pr_err("%s: Internal clock never "
"stabilised.\n", mmc_hostname(host->mmc));
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
return;
}
......@@ -1274,19 +1275,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
struct mmc_host *mmc = host->mmc;
u8 pwr = 0;
if (!IS_ERR(mmc->supply.vmmc)) {
spin_unlock_irq(&host->lock);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
spin_lock_irq(&host->lock);
if (mode != MMC_POWER_OFF)
sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
else
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
return;
}
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
......@@ -1301,7 +1289,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
pwr = SDHCI_POWER_330;
break;
default:
BUG();
WARN(1, "%s: Invalid vdd %#x\n",
mmc_hostname(host->mmc), vdd);
break;
}
}
......@@ -1345,6 +1335,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
}
if (!IS_ERR(mmc->supply.vmmc)) {
spin_unlock_irq(&host->lock);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
spin_lock_irq(&host->lock);
}
}
/*****************************************************************************\
......@@ -1540,8 +1536,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
else {
pr_warn("%s: invalid driver type, default to "
"driver type B\n", mmc_hostname(mmc));
pr_warn("%s: invalid driver type, default to driver type B\n",
mmc_hostname(mmc));
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
}
......@@ -2015,10 +2011,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
spin_lock_irqsave(&host->lock, flags);
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for "
"Buffer Read Ready interrupt during tuning "
"procedure, falling back to fixed sampling "
"clock\n");
pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
......@@ -2046,9 +2039,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}
if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
pr_info(DRIVER_NAME ": Tuning procedure"
" failed, falling back to fixed sampling"
" clock\n");
pr_info(DRIVER_NAME ": Tuning procedure failed, falling back to fixed sampling clock\n");
err = -EIO;
}
......@@ -2293,8 +2284,8 @@ static void sdhci_timeout_timer(unsigned long data)
spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
pr_err("%s: Timeout waiting for hardware "
"interrupt.\n", mmc_hostname(host->mmc));
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
if (host->data) {
......@@ -2325,9 +2316,8 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
BUG_ON(intmask == 0);
if (!host->cmd) {
pr_err("%s: Got command interrupt 0x%08x even "
"though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
return;
}
......@@ -2356,8 +2346,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
*/
if (host->cmd->flags & MMC_RSP_BUSY) {
if (host->cmd->data)
DBG("Cannot wait for busy signal when also "
"doing a data transfer");
DBG("Cannot wait for busy signal when also doing a data transfer");
else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
&& !host->busy_handle) {
/* Mark that command complete before busy is ended */
......@@ -2451,9 +2440,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
}
pr_err("%s: Got data interrupt 0x%08x even "
"though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
return;
......@@ -2760,7 +2748,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
if (host->runtime_suspended || host->bus_on)
if (host->bus_on)
return;
host->bus_on = true;
pm_runtime_get_noresume(host->mmc->parent);
......@@ -2768,7 +2756,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
if (host->runtime_suspended || !host->bus_on)
if (!host->bus_on)
return;
host->bus_on = false;
pm_runtime_put_noidle(host->mmc->parent);
......@@ -2896,9 +2884,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->version = (host->version & SDHCI_SPEC_VER_MASK)
>> SDHCI_SPEC_VER_SHIFT;
if (host->version > SDHCI_SPEC_300) {
pr_err("%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
host->version);
pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
mmc_hostname(mmc), host->version);
}
caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
......@@ -2967,24 +2954,17 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->flags & SDHCI_USE_64_BIT_DMA) {
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
SDHCI_ADMA2_64_DESC_SZ;
host->align_buffer_sz = SDHCI_MAX_SEGS *
SDHCI_ADMA2_64_ALIGN;
host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
host->align_sz = SDHCI_ADMA2_64_ALIGN;
host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
} else {
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
SDHCI_ADMA2_32_DESC_SZ;
host->align_buffer_sz = SDHCI_MAX_SEGS *
SDHCI_ADMA2_32_ALIGN;
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
host->align_sz = SDHCI_ADMA2_32_ALIGN;
host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
}
host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
host->adma_table_sz,
&host->adma_addr,
GFP_KERNEL);
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
if (host->adma_table)
......@@ -2998,7 +2978,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_ADMA;
host->adma_table = NULL;
host->align_buffer = NULL;
} else if (host->adma_addr & host->align_mask) {
} else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
......@@ -3031,8 +3011,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->max_clk == 0 || host->quirks &
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
if (!host->ops->get_max_clock) {
pr_err("%s: Hardware doesn't specify base clock "
"frequency.\n", mmc_hostname(mmc));
pr_err("%s: Hardware doesn't specify base clock frequency.\n",
mmc_hostname(mmc));
return -ENODEV;
}
host->max_clk = host->ops->get_max_clock(host);
......@@ -3294,8 +3274,8 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) {
pr_err("%s: Hardware doesn't report any "
"support voltages.\n", mmc_hostname(mmc));
pr_err("%s: Hardware doesn't report any support voltages.\n",
mmc_hostname(mmc));
return -ENODEV;
}
......
......@@ -272,22 +272,27 @@
/* ADMA2 32-bit DMA descriptor size */
#define SDHCI_ADMA2_32_DESC_SZ 8
/* ADMA2 32-bit DMA alignment */
#define SDHCI_ADMA2_32_ALIGN 4
/* ADMA2 32-bit descriptor */
struct sdhci_adma2_32_desc {
__le16 cmd;
__le16 len;
__le32 addr;
} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
} __packed __aligned(4);
/* ADMA2 data alignment */
#define SDHCI_ADMA2_ALIGN 4
#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1)
/*
* ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte
* alignment for the descriptor table even in 32-bit DMA mode. Memory
* allocation is at least 8 byte aligned anyway, so just stipulate 8 always.
*/
#define SDHCI_ADMA2_DESC_ALIGN 8
/* ADMA2 64-bit DMA descriptor size */
#define SDHCI_ADMA2_64_DESC_SZ 12
/* ADMA2 64-bit DMA alignment */
#define SDHCI_ADMA2_64_ALIGN 8
/*
* ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
* aligned.
......@@ -482,8 +487,6 @@ struct sdhci_host {
dma_addr_t align_addr; /* Mapped bounce buffer */
unsigned int desc_sz; /* ADMA descriptor size */
unsigned int align_sz; /* ADMA alignment */
unsigned int align_mask; /* ADMA alignment mask */
struct tasklet_struct finish_tasklet; /* Tasklet structures */
......
......@@ -397,38 +397,26 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
}
static struct dma_chan *
sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata,
enum dma_transfer_direction direction)
sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
{
struct dma_slave_config cfg = { 0, };
struct dma_chan *chan;
void *slave_data = NULL;
struct resource *res;
struct device *dev = sh_mmcif_host_to_dev(host);
dma_cap_mask_t mask;
int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
if (slave_id <= 0)
return NULL;
if (pdata)
slave_data = direction == DMA_MEM_TO_DEV ?
(void *)pdata->slave_id_tx :
(void *)pdata->slave_id_rx;
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
slave_data, dev,
direction == DMA_MEM_TO_DEV ? "tx" : "rx");
dev_dbg(dev, "%s: %s: got channel %p\n", __func__,
direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
}
if (!chan)
return NULL;
static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
struct dma_chan *chan,
enum dma_transfer_direction direction)
{
struct resource *res;
struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
......@@ -439,38 +427,42 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
}
ret = dmaengine_slave_config(chan, &cfg);
if (ret < 0) {
dma_release_channel(chan);
return NULL;
}
return chan;
return dmaengine_slave_config(chan, &cfg);
}
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
{
struct device *dev = sh_mmcif_host_to_dev(host);
host->dma_active = false;
if (pdata) {
if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
return;
} else if (!dev->of_node) {
return;
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
struct sh_mmcif_plat_data *pdata = dev->platform_data;
host->chan_tx = sh_mmcif_request_dma_pdata(host,
pdata->slave_id_tx);
host->chan_rx = sh_mmcif_request_dma_pdata(host,
pdata->slave_id_rx);
} else {
host->chan_tx = dma_request_slave_channel(dev, "tx");
host->chan_tx = dma_request_slave_channel(dev, "rx");
}
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
host->chan_rx);
/* We can only either use DMA for both Tx and Rx or not use it at all */
host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
if (!host->chan_tx)
return;
if (!host->chan_tx || !host->chan_rx ||
sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
goto error;
host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
if (!host->chan_rx) {
return;
error:
if (host->chan_tx)
dma_release_channel(host->chan_tx);
host->chan_tx = NULL;
}
if (host->chan_rx)
dma_release_channel(host->chan_rx);
host->chan_tx = host->chan_rx = NULL;
}
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
......@@ -1102,7 +1094,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode == MMC_POWER_UP) {
if (!host->card_present) {
/* See if we also get DMA */
sh_mmcif_request_dma(host, dev->platform_data);
sh_mmcif_request_dma(host);
host->card_present = true;
}
sh_mmcif_set_power(host, ios);
......
......@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = mrq ? mrq->data : NULL;
struct scatterlist *sg = host->sg ?: data->sg;
struct scatterlist *sg;
dev_warn(mmc_dev(host->mmc),
"%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
......@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
case USDHI6_WAIT_FOR_MWRITE:
case USDHI6_WAIT_FOR_READ:
case USDHI6_WAIT_FOR_WRITE:
sg = host->sg ?: data->sg;
dev_dbg(mmc_dev(host->mmc),
"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
......
......@@ -172,7 +172,7 @@ struct dw_mci {
/* For edmac */
struct dw_mci_dma_slave *dms;
/* Registers's physical base address */
void *phy_regs;
resource_size_t phy_regs;
u32 cmd_status;
u32 data_status;
......@@ -235,16 +235,10 @@ struct dw_mci_dma_ops {
};
/* IP Quirks/flags. */
/* DTO fix for command transmission with IDMAC configured */
#define DW_MCI_QUIRK_IDMAC_DTO BIT(0)
/* delay needed between retries on some 2.11a implementations */
#define DW_MCI_QUIRK_RETRY_DELAY BIT(1)
/* High Speed Capable - Supports HS cards (up to 50MHz) */
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(0)
/* Timer for broken data transfer over scheme */
#define DW_MCI_QUIRK_BROKEN_DTO BIT(4)
#define DW_MCI_QUIRK_BROKEN_DTO BIT(1)
struct dma_pdata;
......
......@@ -212,7 +212,9 @@ struct mmc_host {
u32 ocr_avail_sdio; /* SDIO-specific OCR */
u32 ocr_avail_sd; /* SD-specific OCR */
u32 ocr_avail_mmc; /* MMC-specific OCR */
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
#endif
u32 max_current_330;
u32 max_current_300;
u32 max_current_180;
......@@ -259,7 +261,6 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
......@@ -289,6 +290,7 @@ struct mmc_host {
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
#define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */
mmc_pm_flag_t pm_caps; /* supported pm features */
......@@ -434,8 +436,6 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
int mmc_regulator_get_supply(struct mmc_host *mmc);
int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *);
static inline int mmc_card_is_removable(struct mmc_host *host)
{
return !(host->caps & MMC_CAP_NONREMOVABLE);
......
/*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#ifndef __MMC_MVSDIO_H
#define __MMC_MVSDIO_H
#include <linux/mbus.h>
struct mvsdio_platform_data {
unsigned int clock;
int gpio_card_detect;
int gpio_write_protect;
};
#endif
......@@ -1534,8 +1534,7 @@ config FAIL_IO_TIMEOUT
config FAIL_MMC_REQUEST
bool "Fault-injection capability for MMC IO"
select DEBUG_FS
depends on FAULT_INJECTION && MMC
depends on FAULT_INJECTION_DEBUG_FS && MMC
help
Provide fault-injection capability for MMC IO.
This will make the mmc core return data errors. This is
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册