提交 cd76e5c5 编写于 作者: J Jan Glauber 提交者: Ulf Hansson

mmc: cavium: Add scatter-gather DMA support

Add Support for the scatter-gather DMA available in the
ThunderX MMC units. Up to 16 DMA requests can be processed
together.
Signed-off-by: NJan Glauber <jglauber@cavium.com>
Signed-off-by: NUlf Hansson <ulf.hansson@linaro.org>
上级 166bac38
...@@ -82,7 +82,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev, ...@@ -82,7 +82,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->dma_base = host->base; host->dma_base = host->base;
host->reg_off = 0x2000; host->reg_off = 0x2000;
host->reg_off_dma = 0x180; host->reg_off_dma = 0x160;
host->clk = devm_clk_get(dev, NULL); host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(host->clk)) if (IS_ERR(host->clk))
...@@ -101,6 +101,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev, ...@@ -101,6 +101,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->release_bus = thunder_mmc_release_bus; host->release_bus = thunder_mmc_release_bus;
host->int_enable = thunder_mmc_int_enable; host->int_enable = thunder_mmc_int_enable;
host->use_sg = true;
host->big_dma_addr = true; host->big_dma_addr = true;
host->need_irq_handler_lock = true; host->need_irq_handler_lock = true;
host->last_slot = -1; host->last_slot = -1;
...@@ -115,6 +116,8 @@ static int thunder_mmc_probe(struct pci_dev *pdev, ...@@ -115,6 +116,8 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
*/ */
writeq(127, host->base + MIO_EMM_INT_EN(host)); writeq(127, host->base + MIO_EMM_INT_EN(host));
writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host)); writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
/* Clear DMA FIFO */
writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
ret = thunder_mmc_register_interrupts(host, pdev); ret = thunder_mmc_register_interrupts(host, pdev);
if (ret) if (ret)
......
...@@ -377,8 +377,31 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) ...@@ -377,8 +377,31 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
return 1; return 1;
} }
static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 fifo_cfg;
int count;
/* Check if there are any pending requests left */
fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
if (count)
dev_err(host->dev, "%u requests still pending\n", count);
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
/* Clear and disable FIFO */
writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data) static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
{ {
if (host->use_sg && data->sg_len > 1)
return finish_dma_sg(host, data);
else
return finish_dma_single(host, data); return finish_dma_single(host, data);
} }
...@@ -522,8 +545,80 @@ static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) ...@@ -522,8 +545,80 @@ static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
return addr; return addr;
} }
/*
* Queue complete sg list into the FIFO.
* Returns 0 on error, 1 otherwise.
*/
static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
struct scatterlist *sg;
u64 fifo_cmd, addr;
int count, i, rw;
count = dma_map_sg(host->dev, data->sg, data->sg_len,
get_dma_dir(data));
if (!count)
return 0;
if (count > 16)
goto error;
/* Enable FIFO by removing CLR bit */
writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
for_each_sg(data->sg, sg, count, i) {
/* Program DMA address */
addr = sg_dma_address(sg);
if (addr & 7)
goto error;
writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
/*
* If we have scatter-gather support we also have an extra
* register for the DMA addr, so no need to check
* host->big_dma_addr here.
*/
rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
/* enable interrupts on the last element */
fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
(i + 1 == count) ? 0 : 1);
#ifdef __LITTLE_ENDIAN
fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
#endif
fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
sg_dma_len(sg) / 8 - 1);
/*
* The write copies the address and the command to the FIFO
* and increments the FIFO's COUNT field.
*/
writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
(rw) ? "W" : "R", sg_dma_len(sg), i, count);
}
/*
* In difference to prepare_dma_single we don't return the
* address here, as it would not make sense for scatter-gather.
* The dma fixup is only required on models that don't support
* scatter-gather, so that is not a problem.
*/
return 1;
error:
WARN_ON_ONCE(1);
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
/* Disable FIFO */
writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
return 0;
}
static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data) static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
{ {
if (host->use_sg && data->sg_len > 1)
return prepare_dma_sg(host, data);
else
return prepare_dma_single(host, data); return prepare_dma_single(host, data);
} }
...@@ -940,6 +1035,9 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) ...@@ -940,6 +1035,9 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD; MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
if (host->use_sg)
mmc->max_segs = 16;
else
mmc->max_segs = 1; mmc->max_segs = 1;
/* DMA size field can address up to 8 MB */ /* DMA size field can address up to 8 MB */
......
...@@ -23,12 +23,15 @@ ...@@ -23,12 +23,15 @@
#define CAVIUM_MAX_MMC 4 #define CAVIUM_MAX_MMC 4
/* DMA register addresses */ /* DMA register addresses */
#define MIO_EMM_DMA_CFG(x) (0x00 + x->reg_off_dma) #define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma)
#define MIO_EMM_DMA_ADR(x) (0x08 + x->reg_off_dma) #define MIO_EMM_DMA_FIFO_ADR(x) (0x10 + x->reg_off_dma)
#define MIO_EMM_DMA_INT(x) (0x10 + x->reg_off_dma) #define MIO_EMM_DMA_FIFO_CMD(x) (0x18 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_W1S(x) (0x18 + x->reg_off_dma) #define MIO_EMM_DMA_CFG(x) (0x20 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x20 + x->reg_off_dma) #define MIO_EMM_DMA_ADR(x) (0x28 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x28 + x->reg_off_dma) #define MIO_EMM_DMA_INT(x) (0x30 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_W1S(x) (0x38 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma)
/* register addresses */ /* register addresses */
#define MIO_EMM_CFG(x) (0x00 + x->reg_off) #define MIO_EMM_CFG(x) (0x00 + x->reg_off)
...@@ -64,6 +67,7 @@ struct cvm_mmc_host { ...@@ -64,6 +67,7 @@ struct cvm_mmc_host {
struct mmc_request *current_req; struct mmc_request *current_req;
struct sg_mapping_iter smi; struct sg_mapping_iter smi;
bool dma_active; bool dma_active;
bool use_sg;
bool has_ciu3; bool has_ciu3;
bool big_dma_addr; bool big_dma_addr;
...@@ -113,6 +117,18 @@ struct cvm_mmc_cr_mods { ...@@ -113,6 +117,18 @@ struct cvm_mmc_cr_mods {
}; };
/* Bitfield definitions */ /* Bitfield definitions */
#define MIO_EMM_DMA_FIFO_CFG_CLR BIT_ULL(16)
#define MIO_EMM_DMA_FIFO_CFG_INT_LVL GENMASK_ULL(12, 8)
#define MIO_EMM_DMA_FIFO_CFG_COUNT GENMASK_ULL(4, 0)
#define MIO_EMM_DMA_FIFO_CMD_RW BIT_ULL(62)
#define MIO_EMM_DMA_FIFO_CMD_INTDIS BIT_ULL(60)
#define MIO_EMM_DMA_FIFO_CMD_SWAP32 BIT_ULL(59)
#define MIO_EMM_DMA_FIFO_CMD_SWAP16 BIT_ULL(58)
#define MIO_EMM_DMA_FIFO_CMD_SWAP8 BIT_ULL(57)
#define MIO_EMM_DMA_FIFO_CMD_ENDIAN BIT_ULL(56)
#define MIO_EMM_DMA_FIFO_CMD_SIZE GENMASK_ULL(55, 36)
#define MIO_EMM_CMD_SKIP_BUSY BIT_ULL(62) #define MIO_EMM_CMD_SKIP_BUSY BIT_ULL(62)
#define MIO_EMM_CMD_BUS_ID GENMASK_ULL(61, 60) #define MIO_EMM_CMD_BUS_ID GENMASK_ULL(61, 60)
#define MIO_EMM_CMD_VAL BIT_ULL(59) #define MIO_EMM_CMD_VAL BIT_ULL(59)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册