You need to sign in or sign up before continuing.
提交 b00ed48b 编写于 作者: L Linus Torvalds

Merge tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "Nothing special, this includes a couple of new device support and new
  driver support and bunch of driver updates.

  New support:

   - Tegra gpcdma driver support

   - Qualcomm SM8350, Sm8450 and SC7280 device support

   - Renesas RZN1 dma and platform support

  Updates:

   - stm32 device pause/resume support and updates

   - DMA memset ops Documentation and usage clarification

   - deprecate '#dma-channels' & '#dma-requests' bindings

   - driver updates for stm32, ptdma idsx etc"

* tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (87 commits)
  dmaengine: idxd: make idxd_wq_enable() return 0 if wq is already enabled
  dmaengine: sun6i: Add support for the D1 variant
  dmaengine: sun6i: Add support for 34-bit physical addresses
  dmaengine: sun6i: Do not use virt_to_phys
  dt-bindings: dma: sun50i-a64: Add compatible for D1
  dmaengine: tegra: Remove unused switch case
  dmaengine: tegra: Fix uninitialized variable usage
  dmaengine: stm32-dma: add device_pause/device_resume support
  dmaengine: stm32-dma: rename pm ops before dma pause/resume introduction
  dmaengine: stm32-dma: pass DMA_SxSCR value to stm32_dma_handle_chan_done()
  dmaengine: stm32-dma: introduce stm32_dma_sg_inc to manage chan->next_sg
  dmaengine: stm32-dmamux: avoid reset of dmamux if used by coprocessor
  dmaengine: qcom: gpi: Add support for sc7280
  dt-bindings: dma: pl330: Add power-domains
  dmaengine: stm32-mdma: use dev_dbg on non-busy channel spurious it
  dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler()
  dmaengine: stm32-mdma: remove GISR1 register
  dmaengine: ti: deprecate '#dma-channels'
  dmaengine: mmp: deprecate '#dma-channels'
  dmaengine: pxa: deprecate '#dma-channels' and '#dma-requests'
  ...
...@@ -39,6 +39,17 @@ properties: ...@@ -39,6 +39,17 @@ properties:
'#power-domain-cells': '#power-domain-cells':
const: 0 const: 0
'#address-cells':
const: 1
'#size-cells':
const: 1
patternProperties:
"^dma-router@[a-f0-9]+$":
type: object
$ref: "../dma/renesas,rzn1-dmamux.yaml#"
required: required:
- compatible - compatible
- reg - reg
......
...@@ -20,9 +20,11 @@ properties: ...@@ -20,9 +20,11 @@ properties:
compatible: compatible:
oneOf: oneOf:
- const: allwinner,sun50i-a64-dma - enum:
- const: allwinner,sun50i-a100-dma - allwinner,sun20i-d1-dma
- const: allwinner,sun50i-h6-dma - allwinner,sun50i-a64-dma
- allwinner,sun50i-a100-dma
- allwinner,sun50i-h6-dma
- items: - items:
- const: allwinner,sun8i-r40-dma - const: allwinner,sun8i-r40-dma
- const: allwinner,sun50i-a64-dma - const: allwinner,sun50i-a64-dma
...@@ -58,6 +60,7 @@ if: ...@@ -58,6 +60,7 @@ if:
properties: properties:
compatible: compatible:
enum: enum:
- allwinner,sun20i-d1-dma
- allwinner,sun50i-a100-dma - allwinner,sun50i-a100-dma
- allwinner,sun50i-h6-dma - allwinner,sun50i-h6-dma
......
...@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# ...@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Altera mSGDMA IP core title: Altera mSGDMA IP core
maintainers: maintainers:
- Olivier Dautricourt <olivier.dautricourt@orolia.com> - Olivier Dautricourt <olivierdautricourt@gmail.com>
description: | description: |
Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA) Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA)
......
...@@ -55,6 +55,9 @@ properties: ...@@ -55,6 +55,9 @@ properties:
dma-coherent: true dma-coherent: true
power-domains:
maxItems: 1
resets: resets:
minItems: 1 minItems: 1
maxItems: 2 maxItems: 2
......
...@@ -10,10 +10,12 @@ Required properties: ...@@ -10,10 +10,12 @@ Required properties:
or one irq for pdma device or one irq for pdma device
Optional properties: Optional properties:
- #dma-channels: Number of DMA channels supported by the controller (defaults - dma-channels: Number of DMA channels supported by the controller (defaults
to 32 when not specified) to 32 when not specified)
- #dma-requests: Number of DMA requestor lines supported by the controller - #dma-channels: deprecated
- dma-requests: Number of DMA requestor lines supported by the controller
(defaults to 32 when not specified) (defaults to 32 when not specified)
- #dma-requests: deprecated
"marvell,pdma-1.0" "marvell,pdma-1.0"
Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688. Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688.
...@@ -33,7 +35,7 @@ pdma: dma-controller@d4000000 { ...@@ -33,7 +35,7 @@ pdma: dma-controller@d4000000 {
reg = <0xd4000000 0x10000>; reg = <0xd4000000 0x10000>;
interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>; interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>;
interrupt-parent = <&intcmux32>; interrupt-parent = <&intcmux32>;
#dma-channels = <16>; dma-channels = <16>;
}; };
/* /*
...@@ -45,7 +47,7 @@ pdma: dma-controller@d4000000 { ...@@ -45,7 +47,7 @@ pdma: dma-controller@d4000000 {
compatible = "marvell,pdma-1.0"; compatible = "marvell,pdma-1.0";
reg = <0xd4000000 0x10000>; reg = <0xd4000000 0x10000>;
interrupts = <47>; interrupts = <47>;
#dma-channels = <16>; dma-channels = <16>;
}; };
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/nvidia,tegra186-gpc-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NVIDIA Tegra GPC DMA Controller Device Tree Bindings
description: |
The Tegra General Purpose Central (GPC) DMA controller is used for faster
data transfers between memory to memory, memory to device and device to
memory.
maintainers:
- Jon Hunter <jonathanh@nvidia.com>
- Rajesh Gumasta <rgumasta@nvidia.com>
allOf:
- $ref: "dma-controller.yaml#"
properties:
compatible:
oneOf:
- const: nvidia,tegra186-gpcdma
- items:
- const: nvidia,tegra194-gpcdma
- const: nvidia,tegra186-gpcdma
"#dma-cells":
const: 1
reg:
maxItems: 1
interrupts:
description:
Should contain all of the per-channel DMA interrupts in
ascending order with respect to the DMA channel index.
minItems: 1
maxItems: 31
resets:
maxItems: 1
reset-names:
const: gpcdma
iommus:
maxItems: 1
dma-coherent: true
required:
- compatible
- reg
- interrupts
- resets
- reset-names
- "#dma-cells"
- iommus
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/memory/tegra186-mc.h>
#include <dt-bindings/reset/tegra186-reset.h>
dma-controller@2600000 {
compatible = "nvidia,tegra186-gpcdma";
reg = <0x2600000 0x210000>;
resets = <&bpmp TEGRA186_RESET_GPCDMA>;
reset-names = "gpcdma";
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
iommus = <&smmu TEGRA186_SID_GPCDMA_0>;
dma-coherent;
};
...
...@@ -19,9 +19,12 @@ allOf: ...@@ -19,9 +19,12 @@ allOf:
properties: properties:
compatible: compatible:
enum: enum:
- qcom,sc7280-gpi-dma
- qcom,sdm845-gpi-dma - qcom,sdm845-gpi-dma
- qcom,sm8150-gpi-dma - qcom,sm8150-gpi-dma
- qcom,sm8250-gpi-dma - qcom,sm8250-gpi-dma
- qcom,sm8350-gpi-dma
- qcom,sm8450-gpi-dma
reg: reg:
maxItems: 1 maxItems: 1
......
...@@ -42,11 +42,10 @@ properties: ...@@ -42,11 +42,10 @@ properties:
- const: renesas,rcar-dmac - const: renesas,rcar-dmac
- items: - items:
- const: renesas,dmac-r8a779a0 # R-Car V3U - enum:
- renesas,dmac-r8a779a0 # R-Car V3U
- items: - renesas,dmac-r8a779f0 # R-Car S4-8
- const: renesas,dmac-r8a779f0 # R-Car S4-8 - const: renesas,rcar-gen4-dmac # R-Car Gen4
- const: renesas,rcar-gen4-dmac
reg: true reg: true
...@@ -121,7 +120,6 @@ if: ...@@ -121,7 +120,6 @@ if:
compatible: compatible:
contains: contains:
enum: enum:
- renesas,dmac-r8a779a0
- renesas,rcar-gen4-dmac - renesas,rcar-gen4-dmac
then: then:
properties: properties:
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/renesas,rzn1-dmamux.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas RZ/N1 DMA mux
maintainers:
- Miquel Raynal <miquel.raynal@bootlin.com>
allOf:
- $ref: "dma-router.yaml#"
properties:
compatible:
const: renesas,rzn1-dmamux
reg:
maxItems: 1
description: DMA mux first register offset within the system control parent.
'#dma-cells':
const: 6
description:
The first four cells are dedicated to the master DMA controller. The fifth
cell gives the DMA mux bit index that must be set starting from 0. The
sixth cell gives the binary value that must be written there, ie. 0 or 1.
dma-masters:
minItems: 1
maxItems: 2
dma-requests:
const: 32
required:
- reg
- dma-requests
additionalProperties: false
examples:
- |
dma-router@a0 {
compatible = "renesas,rzn1-dmamux";
reg = <0xa0 4>;
#dma-cells = <6>;
dma-masters = <&dma0 &dma1>;
dma-requests = <32>;
};
...@@ -28,7 +28,15 @@ allOf: ...@@ -28,7 +28,15 @@ allOf:
properties: properties:
compatible: compatible:
items: items:
- const: sifive,fu540-c000-pdma - enum:
- sifive,fu540-c000-pdma
- const: sifive,pdma0
description:
Should be "sifive,<chip>-pdma" and "sifive,pdma<version>".
Supported compatible strings are -
"sifive,fu540-c000-pdma" for the SiFive PDMA v0 as integrated onto the
SiFive FU540 chip resp and "sifive,pdma0" for the SiFive PDMA v0 IP block
with no chip integration tweaks.
reg: reg:
maxItems: 1 maxItems: 1
...@@ -37,6 +45,12 @@ properties: ...@@ -37,6 +45,12 @@ properties:
minItems: 1 minItems: 1
maxItems: 8 maxItems: 8
dma-channels:
description: For backwards-compatibility, the default value is 4
minimum: 1
maximum: 4
default: 4
'#dma-cells': '#dma-cells':
const: 1 const: 1
...@@ -50,8 +64,9 @@ unevaluatedProperties: false ...@@ -50,8 +64,9 @@ unevaluatedProperties: false
examples: examples:
- | - |
dma-controller@3000000 { dma-controller@3000000 {
compatible = "sifive,fu540-c000-pdma"; compatible = "sifive,fu540-c000-pdma", "sifive,pdma0";
reg = <0x3000000 0x8000>; reg = <0x3000000 0x8000>;
dma-channels = <4>;
interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>; interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>;
#dma-cells = <1>; #dma-cells = <1>;
}; };
......
...@@ -15,7 +15,13 @@ allOf: ...@@ -15,7 +15,13 @@ allOf:
properties: properties:
compatible: compatible:
const: snps,dma-spear1340 oneOf:
- const: snps,dma-spear1340
- items:
- enum:
- renesas,r9a06g032-dma
- const: renesas,rzn1-dma
"#dma-cells": "#dma-cells":
minimum: 3 minimum: 3
......
...@@ -8,10 +8,13 @@ Required properties: ...@@ -8,10 +8,13 @@ Required properties:
- interrupts: Should contain one interrupt shared by all channel. - interrupts: Should contain one interrupt shared by all channel.
- #dma-cells: must be <1>. Used to represent the number of integer - #dma-cells: must be <1>. Used to represent the number of integer
cells in the dmas property of client device. cells in the dmas property of client device.
- #dma-channels : Number of DMA channels supported. Should be 32. - dma-channels : Number of DMA channels supported. Should be 32.
- clock-names: Should contain the clock of the DMA controller. - clock-names: Should contain the clock of the DMA controller.
- clocks: Should contain a clock specifier for each entry in clock-names. - clocks: Should contain a clock specifier for each entry in clock-names.
Deprecated properties:
- #dma-channels : Number of DMA channels supported. Should be 32.
Example: Example:
Controller: Controller:
...@@ -20,7 +23,7 @@ apdma: dma-controller@20100000 { ...@@ -20,7 +23,7 @@ apdma: dma-controller@20100000 {
reg = <0x20100000 0x4000>; reg = <0x20100000 0x4000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>; #dma-cells = <1>;
#dma-channels = <32>; dma-channels = <32>;
clock-names = "enable"; clock-names = "enable";
clocks = <&clk_ap_ahb_gates 5>; clocks = <&clk_ap_ahb_gates 5>;
}; };
......
...@@ -110,7 +110,11 @@ axi_vdma_0: axivdma@40030000 { ...@@ -110,7 +110,11 @@ axi_vdma_0: axivdma@40030000 {
Required properties: Required properties:
- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, - dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
where Channel ID is '0' for write/tx and '1' for read/rx where Channel ID is '0' for write/tx and '1' for read/rx
channel. channel. For MCMDA, MM2S channel(write/tx) ID start from
'0' and is in [0-15] range. S2MM channel(read/rx) ID start
from '16' and is in [16-31] range. These channels ID are
fixed irrespective of IP configuration.
- dma-names: a list of DMA channel names, one per "dmas" entry - dma-names: a list of DMA channel names, one per "dmas" entry
Example: Example:
......
...@@ -206,6 +206,12 @@ Currently, the types available are: ...@@ -206,6 +206,12 @@ Currently, the types available are:
- The device is able to perform parity check using RAID6 P+Q - The device is able to perform parity check using RAID6 P+Q
algorithm against a memory buffer. algorithm against a memory buffer.
- DMA_MEMSET
- The device is able to fill memory with the provided pattern
- The pattern is treated as a single byte signed value.
- DMA_INTERRUPT - DMA_INTERRUPT
- The device is able to trigger a dummy transfer that will - The device is able to trigger a dummy transfer that will
...@@ -457,7 +463,7 @@ supported. ...@@ -457,7 +463,7 @@ supported.
- Should use dma_set_residue to report it - Should use dma_set_residue to report it
- In the case of a cyclic transfer, it should only take into - In the case of a cyclic transfer, it should only take into
account the current period. account the total size of the cyclic buffer.
- Should return DMA_OUT_OF_ORDER if the device does not support in order - Should return DMA_OUT_OF_ORDER if the device does not support in order
completion and is completing the operation out of order. completion and is completing the operation out of order.
......
...@@ -820,7 +820,7 @@ S: Maintained ...@@ -820,7 +820,7 @@ S: Maintained
F: drivers/mailbox/mailbox-altera.c F: drivers/mailbox/mailbox-altera.c
ALTERA MSGDMA IP CORE DRIVER ALTERA MSGDMA IP CORE DRIVER
M: Olivier Dautricourt <olivier.dautricourt@orolia.com> M: Olivier Dautricourt <olivierdautricourt@gmail.com>
R: Stefan Roese <sr@denx.de> R: Stefan Roese <sr@denx.de>
L: dmaengine@vger.kernel.org L: dmaengine@vger.kernel.org
S: Odd Fixes S: Odd Fixes
...@@ -19202,6 +19202,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER ...@@ -19202,6 +19202,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
M: Viresh Kumar <vireshk@kernel.org> M: Viresh Kumar <vireshk@kernel.org>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml
F: Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml F: Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
F: drivers/dma/dw/ F: drivers/dma/dw/
F: include/dt-bindings/dma/dw-dmac.h F: include/dt-bindings/dma/dw-dmac.h
......
...@@ -16,13 +16,17 @@ ...@@ -16,13 +16,17 @@
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_clock.h> #include <linux/pm_clock.h>
#include <linux/pm_domain.h> #include <linux/pm_domain.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h> #include <dt-bindings/clock/r9a06g032-sysctrl.h>
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
struct r9a06g032_gate { struct r9a06g032_gate {
u16 gate, reset, ready, midle, u16 gate, reset, ready, midle,
scon, mirack, mistat; scon, mirack, mistat;
...@@ -315,6 +319,30 @@ struct r9a06g032_priv { ...@@ -315,6 +319,30 @@ struct r9a06g032_priv {
void __iomem *reg; void __iomem *reg;
}; };
static struct r9a06g032_priv *sysctrl_priv;
/* Exported helper to access the DMAMUX register */
int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val)
{
unsigned long flags;
u32 dmamux;
if (!sysctrl_priv)
return -EPROBE_DEFER;
spin_lock_irqsave(&sysctrl_priv->lock, flags);
dmamux = readl(sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX);
dmamux &= ~mask;
dmamux |= val & mask;
writel(dmamux, sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX);
spin_unlock_irqrestore(&sysctrl_priv->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux);
/* register/bit pairs are encoded as an uint16_t */ /* register/bit pairs are encoded as an uint16_t */
static void static void
clk_rdesc_set(struct r9a06g032_priv *clocks, clk_rdesc_set(struct r9a06g032_priv *clocks,
...@@ -963,7 +991,17 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev) ...@@ -963,7 +991,17 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
if (error) if (error)
return error; return error;
return r9a06g032_add_clk_domain(dev); error = r9a06g032_add_clk_domain(dev);
if (error)
return error;
sysctrl_priv = clocks;
error = of_platform_populate(np, NULL, NULL, dev);
if (error)
dev_err(dev, "Failed to populate children (%d)\n", error);
return 0;
} }
static const struct of_device_id r9a06g032_match[] = { static const struct of_device_id r9a06g032_match[] = {
......
...@@ -163,7 +163,7 @@ config DMA_SUN4I ...@@ -163,7 +163,7 @@ config DMA_SUN4I
config DMA_SUN6I config DMA_SUN6I
tristate "Allwinner A31 SoCs DMA support" tristate "Allwinner A31 SoCs DMA support"
depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST depends on ARCH_SUNXI || COMPILE_TEST
depends on RESET_CONTROLLER depends on RESET_CONTROLLER
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
...@@ -629,6 +629,18 @@ config TXX9_DMAC ...@@ -629,6 +629,18 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39. integrated in chips such as the Toshiba TX4927/38/39.
config TEGRA186_GPC_DMA
tristate "NVIDIA Tegra GPC DMA support"
depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT
depends on IOMMU_API
select DMA_ENGINE
help
Support for the NVIDIA Tegra General Purpose Central DMA controller.
The DMA controller has multiple DMA channels which can be configured
for different peripherals like UART, SPI, etc which are on APB bus.
This DMA controller transfers data from memory to peripheral FIFO
or vice versa. It also supports memory to memory data transfer.
config TEGRA20_APB_DMA config TEGRA20_APB_DMA
tristate "NVIDIA Tegra20 APB DMA support" tristate "NVIDIA Tegra20 APB DMA support"
depends on ARCH_TEGRA || COMPILE_TEST depends on ARCH_TEGRA || COMPILE_TEST
......
...@@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o ...@@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
......
...@@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan) ...@@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan)); vchan_free_chan_resources(to_virt_chan(chan));
} }
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
struct dma_chan *chan, unsigned long flags)
{
struct dma_async_tx_descriptor *retval = NULL;
return retval;
}
/* /*
* Code accessing dma_async_is_complete() in a tight loop may give problems. * Code accessing dma_async_is_complete() in a tight loop may give problems.
* If slaves are relying on interrupts to signal completion this function * If slaves are relying on interrupts to signal completion this function
...@@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.dev = &adev->dev; pl08x->memcpy.dev = &adev->dev;
pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
pl08x->memcpy.device_issue_pending = pl08x_issue_pending; pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
pl08x->memcpy.device_config = pl08x_config; pl08x->memcpy.device_config = pl08x_config;
...@@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.dev = &adev->dev; pl08x->slave.dev = &adev->dev;
pl08x->slave.device_free_chan_resources = pl08x->slave.device_free_chan_resources =
pl08x_free_chan_resources; pl08x_free_chan_resources;
pl08x->slave.device_prep_dma_interrupt =
pl08x_prep_dma_interrupt;
pl08x->slave.device_tx_status = pl08x_dma_tx_status; pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
......
...@@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, ...@@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct at_desc *desc; struct at_desc *desc;
void __iomem *vaddr; void __iomem *vaddr;
dma_addr_t paddr; dma_addr_t paddr;
char fill_pattern;
dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
&dest, value, len, flags); &dest, value, len, flags);
...@@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, ...@@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
__func__); __func__);
return NULL; return NULL;
} }
*(u32*)vaddr = value;
/* Only the first byte of value is to be used according to dmaengine */
fill_pattern = (char)value;
*(u32*)vaddr = (fill_pattern << 24) |
(fill_pattern << 16) |
(fill_pattern << 8) |
fill_pattern;
desc = atc_create_memset_desc(chan, paddr, dest, len); desc = atc_create_memset_desc(chan, paddr, dest, len);
if (!desc) { if (!desc) {
......
...@@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, ...@@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
unsigned long flags; unsigned long flags;
size_t ublen; size_t ublen;
u32 dwidth; u32 dwidth;
char pattern;
/* /*
* WARNING: The channel configuration is set here since there is no * WARNING: The channel configuration is set here since there is no
* dmaengine_slave_config call in this case. Moreover we don't know the * dmaengine_slave_config call in this case. Moreover we don't know the
...@@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, ...@@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
/* Only the first byte of value is to be used according to dmaengine */
pattern = (char)value;
ublen = len >> dwidth; ublen = len >> dwidth;
desc->lld.mbr_da = dst_addr; desc->lld.mbr_da = dst_addr;
desc->lld.mbr_ds = value; desc->lld.mbr_ds = (pattern << 24) |
(pattern << 16) |
(pattern << 8) |
pattern;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
| AT_XDMAC_MBR_UBC_NDEN | AT_XDMAC_MBR_UBC_NDEN
| AT_XDMAC_MBR_UBC_NSEN | AT_XDMAC_MBR_UBC_NSEN
......
...@@ -17,7 +17,9 @@ ...@@ -17,7 +17,9 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev) ...@@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd = &jzdma->dma_device; dd = &jzdma->dma_device;
/*
* The real segment size limit is dependent on the size unit selected
* for the transfer. Because the size unit is selected automatically
* and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to
* ensure the 24-bit transfer count in the descriptor cannot overflow.
*/
dma_set_max_seg_size(dev, 0xffffff);
dma_cap_set(DMA_MEMCPY, dd->cap_mask); dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask);
......
...@@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, ...@@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding * When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating. * the channel. Otherwise we are static enumerating.
*/ */
mutex_lock(&device->chan_mutex);
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
mutex_unlock(&device->chan_mutex);
if (chan->chan_id < 0) { if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n", pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id); __func__, chan->chan_id);
...@@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, ...@@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
return 0; return 0;
err_out_ida: err_out_ida:
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id); ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
err_free_dev: err_free_dev:
kfree(chan->dev); kfree(chan->dev);
err_free_local: err_free_local:
...@@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, ...@@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--; device->chancnt--;
chan->dev->chan = NULL; chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id); ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device); device_unregister(&chan->dev->device);
free_percpu(chan->local); free_percpu(chan->local);
} }
...@@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0) if (rc != 0)
return rc; return rc;
mutex_init(&device->chan_mutex);
ida_init(&device->chan_ida); ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */ /* represent channels in sysfs. Probably want devs too */
......
...@@ -675,10 +675,16 @@ static int dmatest_func(void *data) ...@@ -675,10 +675,16 @@ static int dmatest_func(void *data)
/* /*
* src and dst buffers are freed by ourselves below * src and dst buffers are freed by ourselves below
*/ */
if (params->polled) if (params->polled) {
flags = DMA_CTRL_ACK; flags = DMA_CTRL_ACK;
else } else {
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
} else {
pr_err("Channel does not support interrupt!\n");
goto err_pq_array;
}
}
ktime = ktime_get(); ktime = ktime_get();
while (!(kthread_should_stop() || while (!(kthread_should_stop() ||
...@@ -906,6 +912,7 @@ static int dmatest_func(void *data) ...@@ -906,6 +912,7 @@ static int dmatest_func(void *data)
runtime = ktime_to_us(ktime); runtime = ktime_to_us(ktime);
ret = 0; ret = 0;
err_pq_array:
kfree(dma_pq); kfree(dma_pq);
err_srcs_array: err_srcs_array:
kfree(srcs); kfree(srcs);
......
...@@ -16,6 +16,15 @@ config DW_DMAC ...@@ -16,6 +16,15 @@ config DW_DMAC
Support the Synopsys DesignWare AHB DMA controller. This Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Intel Cherrytrail. can be integrated in chips such as the Intel Cherrytrail.
config RZN1_DMAMUX
tristate "Renesas RZ/N1 DMAMUX driver"
depends on DW_DMAC
depends on ARCH_RZN1 || COMPILE_TEST
help
Support the Renesas RZ/N1 DMAMUX which is located in front of
the Synopsys DesignWare AHB DMA controller located on Renesas
SoCs.
config DW_DMAC_PCI config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver" tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI depends on PCI
......
...@@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o ...@@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
dw_dmac_pci-y := pci.o dw_dmac_pci-y := pci.o
obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o
...@@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev) ...@@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_OF #ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = { static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata }, { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
{ .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata },
{} {}
}; };
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Schneider-Electric
* Author: Miquel Raynal <miquel.raynal@bootlin.com
* Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
#include <linux/bitops.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/types.h>
#define RNZ1_DMAMUX_NCELLS 6
#define RZN1_DMAMUX_MAX_LINES 64
#define RZN1_DMAMUX_LINES_PER_CTLR 16
struct rzn1_dmamux_data {
struct dma_router dmarouter;
DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR);
};
struct rzn1_dmamux_map {
unsigned int req_idx;
};
static void rzn1_dmamux_free(struct device *dev, void *route_data)
{
struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev);
struct rzn1_dmamux_map *map = route_data;
dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx);
clear_bit(map->req_idx, dmamux->used_chans);
kfree(map);
}
static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev);
struct rzn1_dmamux_map *map;
unsigned int dmac_idx, chan, val;
u32 mask;
int ret;
if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
return ERR_PTR(-EINVAL);
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
chan = dma_spec->args[0];
map->req_idx = dma_spec->args[4];
val = dma_spec->args[5];
dma_spec->args_count -= 2;
if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) {
dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan);
ret = -EINVAL;
goto free_map;
}
if (map->req_idx >= RZN1_DMAMUX_MAX_LINES ||
(map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) {
dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx);
ret = -EINVAL;
goto free_map;
}
dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0;
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
ret = -EINVAL;
goto free_map;
}
dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n",
map->req_idx, dmac_idx, chan);
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
ret = -EBUSY;
goto free_map;
}
mask = BIT(map->req_idx);
ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0);
if (ret)
goto clear_bitmap;
return map;
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
free_map:
kfree(map);
return ERR_PTR(ret);
}
static const struct of_device_id rzn1_dmac_match[] = {
{ .compatible = "renesas,rzn1-dma" },
{}
};
static int rzn1_dmamux_probe(struct platform_device *pdev)
{
struct device_node *mux_node = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *dmac_node;
struct rzn1_dmamux_data *dmamux;
dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
if (!dmamux)
return -ENOMEM;
dmac_node = of_parse_phandle(mux_node, "dma-masters", 0);
if (!dmac_node)
return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n");
match = of_match_node(rzn1_dmac_match, dmac_node);
of_node_put(dmac_node);
if (!match)
return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n");
dmamux->dmarouter.dev = &pdev->dev;
dmamux->dmarouter.route_free = rzn1_dmamux_free;
platform_set_drvdata(pdev, dmamux);
return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate,
&dmamux->dmarouter);
}
static const struct of_device_id rzn1_dmamux_match[] = {
{ .compatible = "renesas,rzn1-dmamux" },
{}
};
static struct platform_driver rzn1_dmamux_driver = {
.driver = {
.name = "renesas,rzn1-dmamux",
.of_match_table = rzn1_dmamux_match,
},
.probe = rzn1_dmamux_probe,
};
module_platform_driver(rzn1_dmamux_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com");
MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver");
...@@ -132,7 +132,7 @@ struct ep93xx_dma_desc { ...@@ -132,7 +132,7 @@ struct ep93xx_dma_desc {
/** /**
* struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
* @chan: dmaengine API channel * @chan: dmaengine API channel
* @edma: pointer to to the engine device * @edma: pointer to the engine device
* @regs: memory mapped registers * @regs: memory mapped registers
* @irq: interrupt number of the channel * @irq: interrupt number of the channel
* @clk: clock used by this channel * @clk: clock used by this channel
......
...@@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq; ctx->wq = wq;
filp->private_data = ctx; filp->private_data = ctx;
if (device_pasid_enabled(idxd)) { if (device_user_pasid_enabled(idxd)) {
sva = iommu_sva_bind_device(dev, current->mm, NULL); sva = iommu_sva_bind_device(dev, current->mm, NULL);
if (IS_ERR(sva)) { if (IS_ERR(sva)) {
rc = PTR_ERR(sva); rc = PTR_ERR(sva);
...@@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) ...@@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
if (wq_shared(wq)) { if (wq_shared(wq)) {
idxd_device_drain_pasid(idxd, ctx->pasid); idxd_device_drain_pasid(idxd, ctx->pasid);
} else { } else {
if (device_pasid_enabled(idxd)) { if (device_user_pasid_enabled(idxd)) {
/* The wq disable in the disable pasid function will drain the wq */ /* The wq disable in the disable pasid function will drain the wq */
rc = idxd_wq_disable_pasid(wq); rc = idxd_wq_disable_pasid(wq);
if (rc < 0) if (rc < 0)
...@@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) ...@@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock); mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_USER; wq->type = IDXD_WQT_USER;
rc = __drv_enable_wq(wq); rc = drv_enable_wq(wq);
if (rc < 0) if (rc < 0)
goto err; goto err;
...@@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) ...@@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
return 0; return 0;
err_cdev: err_cdev:
__drv_disable_wq(wq); drv_disable_wq(wq);
err: err:
wq->type = IDXD_WQT_NONE; wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock); mutex_unlock(&wq->wq_lock);
...@@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) ...@@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock); mutex_lock(&wq->wq_lock);
idxd_wq_del_cdev(wq); idxd_wq_del_cdev(wq);
__drv_disable_wq(wq); drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE; wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock); mutex_unlock(&wq->wq_lock);
} }
...@@ -369,10 +369,16 @@ int idxd_cdev_register(void) ...@@ -369,10 +369,16 @@ int idxd_cdev_register(void)
rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
ictx[i].name); ictx[i].name);
if (rc) if (rc)
return rc; goto err_free_chrdev_region;
} }
return 0; return 0;
err_free_chrdev_region:
for (i--; i >= 0; i--)
unregister_chrdev_region(ictx[i].devt, MINORMASK);
return rc;
} }
void idxd_cdev_remove(void) void idxd_cdev_remove(void)
......
...@@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq) ...@@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
if (wq->state == IDXD_WQ_ENABLED) { if (wq->state == IDXD_WQ_ENABLED) {
dev_dbg(dev, "WQ %d already enabled\n", wq->id); dev_dbg(dev, "WQ %d already enabled\n", wq->id);
return -ENXIO; return 0;
} }
idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
...@@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd) ...@@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
} }
} }
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
{ {
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
int rc;
union wqcfg wqcfg; union wqcfg wqcfg;
unsigned int offset; unsigned int offset;
rc = idxd_wq_disable(wq, false); offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
if (rc < 0) spin_lock(&idxd->dev_lock);
return rc; wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.priv = priv;
wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
spin_unlock(&idxd->dev_lock);
}
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
union wqcfg wqcfg;
unsigned int offset;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
spin_lock(&idxd->dev_lock); spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1; wqcfg.pasid_en = 1;
wqcfg.pasid = pasid; wqcfg.pasid = pasid;
wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
spin_unlock(&idxd->dev_lock); spin_unlock(&idxd->dev_lock);
}
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
{
int rc;
rc = idxd_wq_disable(wq, false);
if (rc < 0)
return rc;
__idxd_wq_set_pasid_locked(wq, pasid);
rc = idxd_wq_enable(wq); rc = idxd_wq_enable(wq);
if (rc < 0) if (rc < 0)
...@@ -555,19 +577,15 @@ int idxd_device_disable(struct idxd_device *idxd) ...@@ -555,19 +577,15 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO; return -ENXIO;
} }
spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd); idxd_device_clear_state(idxd);
idxd->state = IDXD_DEV_DISABLED;
spin_unlock(&idxd->dev_lock);
return 0; return 0;
} }
void idxd_device_reset(struct idxd_device *idxd) void idxd_device_reset(struct idxd_device *idxd)
{ {
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd); idxd_device_clear_state(idxd);
idxd->state = IDXD_DEV_DISABLED; spin_lock(&idxd->dev_lock);
idxd_unmask_error_interrupts(idxd); idxd_unmask_error_interrupts(idxd);
spin_unlock(&idxd->dev_lock); spin_unlock(&idxd->dev_lock);
} }
...@@ -694,15 +712,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) ...@@ -694,15 +712,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
{ {
int i; int i;
lockdep_assert_held(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) { for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i]; struct idxd_wq *wq = idxd->wqs[i];
mutex_lock(&wq->wq_lock);
if (wq->state == IDXD_WQ_ENABLED) { if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq); idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED; wq->state = IDXD_WQ_DISABLED;
} }
idxd_wq_device_reset_cleanup(wq); idxd_wq_device_reset_cleanup(wq);
mutex_unlock(&wq->wq_lock);
} }
} }
...@@ -711,9 +730,12 @@ void idxd_device_clear_state(struct idxd_device *idxd) ...@@ -711,9 +730,12 @@ void idxd_device_clear_state(struct idxd_device *idxd)
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return; return;
idxd_device_wqs_clear_state(idxd);
spin_lock(&idxd->dev_lock);
idxd_groups_clear_state(idxd); idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd); idxd_engines_clear_state(idxd);
idxd_device_wqs_clear_state(idxd); idxd->state = IDXD_DEV_DISABLED;
spin_unlock(&idxd->dev_lock);
} }
static void idxd_group_config_write(struct idxd_group *group) static void idxd_group_config_write(struct idxd_group *group)
...@@ -799,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) ...@@ -799,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
*/ */
for (i = 0; i < WQCFG_STRIDES(idxd); i++) { for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i); wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
} }
if (wq->size == 0 && wq->type != IDXD_WQT_NONE) if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
...@@ -815,14 +837,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq) ...@@ -815,14 +837,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (wq_dedicated(wq)) if (wq_dedicated(wq))
wq->wqcfg->mode = 1; wq->wqcfg->mode = 1;
if (device_pasid_enabled(idxd)) {
wq->wqcfg->pasid_en = 1;
if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
wq->wqcfg->pasid = idxd->pasid;
}
/* /*
* Here the priv bit is set depending on the WQ type. priv = 1 if the * The WQ priv bit is set depending on the WQ type. priv = 1 if the
* WQ type is kernel to indicate privileged access. This setting only * WQ type is kernel to indicate privileged access. This setting only
* matters for dedicated WQ. According to the DSA spec: * matters for dedicated WQ. According to the DSA spec:
* If the WQ is in dedicated mode, WQ PASID Enable is 1, and the * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
...@@ -832,7 +848,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq) ...@@ -832,7 +848,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
* In the case of a dedicated kernel WQ that is not able to support * In the case of a dedicated kernel WQ that is not able to support
* the PASID cap, then the configuration will be rejected. * the PASID cap, then the configuration will be rejected.
*/ */
wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq) && wq->wqcfg->pasid_en && if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
!idxd_device_pasid_priv_enabled(idxd) && !idxd_device_pasid_priv_enabled(idxd) &&
wq->type == IDXD_WQT_KERNEL) { wq->type == IDXD_WQT_KERNEL) {
...@@ -953,7 +968,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd) ...@@ -953,7 +968,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->group) if (!wq->group)
continue; continue;
if (wq_shared(wq) && !device_swq_supported(idxd)) { if (wq_shared(wq) && !wq_shared_supported(wq)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
dev_warn(dev, "No shared wq support but configured.\n"); dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL; return -EINVAL;
...@@ -1018,6 +1033,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq) ...@@ -1018,6 +1033,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
wq->priority = wq->wqcfg->priority; wq->priority = wq->wqcfg->priority;
wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
for (i = 0; i < WQCFG_STRIDES(idxd); i++) { for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
...@@ -1161,7 +1179,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq) ...@@ -1161,7 +1179,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = &wq->ie; struct idxd_irq_entry *ie = &wq->ie;
synchronize_irq(ie->vector); if (wq->type != IDXD_WQT_KERNEL)
return;
free_irq(ie->vector, ie); free_irq(ie->vector, ie);
idxd_flush_pending_descs(ie); idxd_flush_pending_descs(ie);
if (idxd->request_int_handles) if (idxd->request_int_handles)
...@@ -1180,6 +1200,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq) ...@@ -1180,6 +1200,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
struct idxd_irq_entry *ie; struct idxd_irq_entry *ie;
int rc; int rc;
if (wq->type != IDXD_WQT_KERNEL)
return 0;
ie = &wq->ie; ie = &wq->ie;
ie->vector = pci_irq_vector(pdev, ie->id); ie->vector = pci_irq_vector(pdev, ie->id);
ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
...@@ -1211,7 +1234,7 @@ int idxd_wq_request_irq(struct idxd_wq *wq) ...@@ -1211,7 +1234,7 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
return rc; return rc;
} }
int __drv_enable_wq(struct idxd_wq *wq) int drv_enable_wq(struct idxd_wq *wq)
{ {
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev; struct device *dev = &idxd->pdev->dev;
...@@ -1245,7 +1268,7 @@ int __drv_enable_wq(struct idxd_wq *wq) ...@@ -1245,7 +1268,7 @@ int __drv_enable_wq(struct idxd_wq *wq)
/* Shared WQ checks */ /* Shared WQ checks */
if (wq_shared(wq)) { if (wq_shared(wq)) {
if (!device_swq_supported(idxd)) { if (!wq_shared_supported(wq)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
dev_dbg(dev, "PASID not enabled and shared wq.\n"); dev_dbg(dev, "PASID not enabled and shared wq.\n");
goto err; goto err;
...@@ -1265,6 +1288,29 @@ int __drv_enable_wq(struct idxd_wq *wq) ...@@ -1265,6 +1288,29 @@ int __drv_enable_wq(struct idxd_wq *wq)
} }
} }
/*
* In the event that the WQ is configurable for pasid and priv bits.
* For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
* However, for non-kernel wq, the driver should only set the pasid_en bit for
* shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
* pasid_en later on so there is no need to setup.
*/
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
int priv = 0;
if (wq_pasid_enabled(wq)) {
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
__idxd_wq_set_pasid_locked(wq, pasid);
}
}
if (is_idxd_wq_kernel(wq))
priv = 1;
__idxd_wq_set_priv_locked(wq, priv);
}
rc = 0; rc = 0;
spin_lock(&idxd->dev_lock); spin_lock(&idxd->dev_lock);
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
...@@ -1289,8 +1335,36 @@ int __drv_enable_wq(struct idxd_wq *wq) ...@@ -1289,8 +1335,36 @@ int __drv_enable_wq(struct idxd_wq *wq)
} }
wq->client_count = 0; wq->client_count = 0;
rc = idxd_wq_request_irq(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
goto err_irq;
}
rc = idxd_wq_alloc_resources(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
dev_dbg(dev, "WQ resource alloc failed\n");
goto err_res_alloc;
}
rc = idxd_wq_init_percpu_ref(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
dev_dbg(dev, "percpu_ref setup failed\n");
goto err_ref;
}
return 0; return 0;
err_ref:
idxd_wq_free_resources(wq);
err_res_alloc:
idxd_wq_free_irq(wq);
err_irq:
idxd_wq_unmap_portal(wq);
err_map_portal: err_map_portal:
rc = idxd_wq_disable(wq, false); rc = idxd_wq_disable(wq, false);
if (rc < 0) if (rc < 0)
...@@ -1299,17 +1373,7 @@ int __drv_enable_wq(struct idxd_wq *wq) ...@@ -1299,17 +1373,7 @@ int __drv_enable_wq(struct idxd_wq *wq)
return rc; return rc;
} }
int drv_enable_wq(struct idxd_wq *wq) void drv_disable_wq(struct idxd_wq *wq)
{
int rc;
mutex_lock(&wq->wq_lock);
rc = __drv_enable_wq(wq);
mutex_unlock(&wq->wq_lock);
return rc;
}
void __drv_disable_wq(struct idxd_wq *wq)
{ {
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev; struct device *dev = &idxd->pdev->dev;
...@@ -1320,21 +1384,16 @@ void __drv_disable_wq(struct idxd_wq *wq) ...@@ -1320,21 +1384,16 @@ void __drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n", dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq)); wq->id, idxd_wq_refcount(wq));
idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq); idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq); idxd_wq_drain(wq);
idxd_wq_free_irq(wq);
idxd_wq_reset(wq); idxd_wq_reset(wq);
percpu_ref_exit(&wq->wq_active);
wq->type = IDXD_WQT_NONE;
wq->client_count = 0; wq->client_count = 0;
} }
void drv_disable_wq(struct idxd_wq *wq)
{
mutex_lock(&wq->wq_lock);
__drv_disable_wq(wq);
mutex_unlock(&wq->wq_lock);
}
int idxd_device_drv_probe(struct idxd_dev *idxd_dev) int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{ {
struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
......
...@@ -87,6 +87,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, ...@@ -87,6 +87,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->completion_addr = compl; hw->completion_addr = compl;
} }
static struct dma_async_tx_descriptor *
idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
{
struct idxd_wq *wq = to_idxd_wq(c);
u32 desc_flags;
struct idxd_desc *desc;
if (wq->state != IDXD_WQ_ENABLED)
return NULL;
op_flag_setup(flags, &desc_flags);
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
if (IS_ERR(desc))
return NULL;
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
0, 0, 0, desc->compl_dma, desc_flags);
desc->txd.flags = flags;
return &desc->txd;
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags) dma_addr_t dma_src, size_t len, unsigned long flags)
...@@ -193,10 +214,12 @@ int idxd_register_dma_device(struct idxd_device *idxd) ...@@ -193,10 +214,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
dma->dev = dev; dma->dev = dev;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release; dma->device_release = idxd_dma_release;
dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
...@@ -227,7 +250,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd) ...@@ -227,7 +250,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd)
dma_async_device_unregister(&idxd->idxd_dma->dma); dma_async_device_unregister(&idxd->idxd_dma->dma);
} }
int idxd_register_dma_channel(struct idxd_wq *wq) static int idxd_register_dma_channel(struct idxd_wq *wq)
{ {
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
struct dma_device *dma = &idxd->idxd_dma->dma; struct dma_device *dma = &idxd->idxd_dma->dma;
...@@ -264,7 +287,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq) ...@@ -264,7 +287,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
return 0; return 0;
} }
void idxd_unregister_dma_channel(struct idxd_wq *wq) static void idxd_unregister_dma_channel(struct idxd_wq *wq)
{ {
struct idxd_dma_chan *idxd_chan = wq->idxd_chan; struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
struct dma_chan *chan = &idxd_chan->chan; struct dma_chan *chan = &idxd_chan->chan;
...@@ -290,34 +313,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) ...@@ -290,34 +313,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock); mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_KERNEL; wq->type = IDXD_WQT_KERNEL;
rc = idxd_wq_request_irq(wq); rc = drv_enable_wq(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
goto err_irq;
}
rc = __drv_enable_wq(wq);
if (rc < 0) { if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
rc = -ENXIO; rc = -ENXIO;
goto err; goto err;
} }
rc = idxd_wq_alloc_resources(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
dev_dbg(dev, "WQ resource alloc failed\n");
goto err_res_alloc;
}
rc = idxd_wq_init_percpu_ref(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
dev_dbg(dev, "percpu_ref setup failed\n");
goto err_ref;
}
rc = idxd_register_dma_channel(wq); rc = idxd_register_dma_channel(wq);
if (rc < 0) { if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR; idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
...@@ -330,15 +332,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) ...@@ -330,15 +332,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0; return 0;
err_dma: err_dma:
__idxd_wq_quiesce(wq); drv_disable_wq(wq);
percpu_ref_exit(&wq->wq_active);
err_ref:
idxd_wq_free_resources(wq);
err_res_alloc:
__drv_disable_wq(wq);
err: err:
idxd_wq_free_irq(wq);
err_irq:
wq->type = IDXD_WQT_NONE; wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock); mutex_unlock(&wq->wq_lock);
return rc; return rc;
...@@ -351,11 +346,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) ...@@ -351,11 +346,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock); mutex_lock(&wq->wq_lock);
__idxd_wq_quiesce(wq); __idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq); idxd_unregister_dma_channel(wq);
idxd_wq_free_resources(wq); drv_disable_wq(wq);
__drv_disable_wq(wq);
percpu_ref_exit(&wq->wq_active);
idxd_wq_free_irq(wq);
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock); mutex_unlock(&wq->wq_lock);
} }
......
...@@ -239,6 +239,7 @@ enum idxd_device_flag { ...@@ -239,6 +239,7 @@ enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0, IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING, IDXD_FLAG_CMD_RUNNING,
IDXD_FLAG_PASID_ENABLED, IDXD_FLAG_PASID_ENABLED,
IDXD_FLAG_USER_PASID_ENABLED,
}; };
struct idxd_dma_dev { struct idxd_dma_dev {
...@@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd) ...@@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd)
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} }
static inline bool device_swq_supported(struct idxd_device *idxd) static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
{ {
return (support_enqcmd && device_pasid_enabled(idxd)); return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
}
static inline bool wq_pasid_enabled(struct idxd_wq *wq)
{
return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
(is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
}
static inline bool wq_shared_supported(struct idxd_wq *wq)
{
return (support_enqcmd && wq_pasid_enabled(wq));
} }
enum idxd_portal_prot { enum idxd_portal_prot {
...@@ -559,9 +571,7 @@ void idxd_unregister_idxd_drv(void); ...@@ -559,9 +571,7 @@ void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev); int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev); void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq); int drv_enable_wq(struct idxd_wq *wq);
int __drv_enable_wq(struct idxd_wq *wq);
void drv_disable_wq(struct idxd_wq *wq); void drv_disable_wq(struct idxd_wq *wq);
void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd);
...@@ -602,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); ...@@ -602,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */ /* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd); int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd); void idxd_unregister_dma_device(struct idxd_device *idxd);
int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc, void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type, bool free_desc); enum idxd_complete_type comp_type, bool free_desc);
......
...@@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd) ...@@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n"); dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
if (rc == 0) { dev_warn(dev, "Unable to turn on user SVA feature.\n");
rc = idxd_enable_system_pasid(idxd); else
if (rc < 0) { set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); if (idxd_enable_system_pasid(idxd))
} else { dev_warn(dev, "No in-kernel DMA with PASID.\n");
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); else
} set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else {
dev_warn(dev, "Unable to turn on SVA feature.\n");
}
} else if (!sva) { } else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n"); dev_warn(dev, "User forced SVA off via module param.\n");
} }
...@@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd) ...@@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd)
err: err:
if (device_pasid_enabled(idxd)) if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd); idxd_disable_system_pasid(idxd);
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); if (device_user_pasid_enabled(idxd))
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
return rc; return rc;
} }
...@@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd) ...@@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd); idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd)) if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd); idxd_disable_system_pasid(idxd);
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); if (device_user_pasid_enabled(idxd))
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
} }
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
...@@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev) ...@@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev)
free_irq(irq_entry->vector, irq_entry); free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base); pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); if (device_user_pasid_enabled(idxd))
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_disable_device(pdev); pci_disable_device(pdev);
destroy_workqueue(idxd->wq); destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd); perfmon_pmu_remove(idxd);
......
...@@ -353,6 +353,7 @@ union wqcfg { ...@@ -353,6 +353,7 @@ union wqcfg {
} __packed; } __packed;
#define WQCFG_PASID_IDX 2 #define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
#define WQCFG_OCCUP_IDX 6 #define WQCFG_OCCUP_IDX 6
#define WQCFG_OCCUP_MASK 0xffff #define WQCFG_OCCUP_MASK 0xffff
......
...@@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev, ...@@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev,
if (sysfs_streq(buf, "dedicated")) { if (sysfs_streq(buf, "dedicated")) {
set_bit(WQ_FLAG_DEDICATED, &wq->flags); set_bit(WQ_FLAG_DEDICATED, &wq->flags);
wq->threshold = 0; wq->threshold = 0;
} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { } else if (sysfs_streq(buf, "shared")) {
clear_bit(WQ_FLAG_DEDICATED, &wq->flags); clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
} else { } else {
return -EINVAL; return -EINVAL;
...@@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev, ...@@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev,
size_t count) size_t count)
{ {
struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_wq *wq = confdev_to_wq(dev);
char *input, *pos;
if (wq->state != IDXD_WQ_DISABLED) if (wq->state != IDXD_WQ_DISABLED)
return -EPERM; return -EPERM;
...@@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev, ...@@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev,
if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
return -EOPNOTSUPP; return -EOPNOTSUPP;
input = kstrndup(buf, count, GFP_KERNEL);
if (!input)
return -ENOMEM;
pos = strim(input);
memset(wq->name, 0, WQ_NAME_SIZE + 1); memset(wq->name, 0, WQ_NAME_SIZE + 1);
strncpy(wq->name, buf, WQ_NAME_SIZE); sprintf(wq->name, "%s", pos);
strreplace(wq->name, '\n', '\0'); kfree(input);
return count; return count;
} }
......
...@@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev) ...@@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
struct mtk_cqdma_device *cqdma; struct mtk_cqdma_device *cqdma;
struct mtk_cqdma_vchan *vc; struct mtk_cqdma_vchan *vc;
struct dma_device *dd; struct dma_device *dd;
struct resource *res;
int err; int err;
u32 i; u32 i;
...@@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev) ...@@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
return PTR_ERR(cqdma->pc[i]->base); return PTR_ERR(cqdma->pc[i]->base);
/* allocate IRQ resource */ /* allocate IRQ resource */
res = platform_get_resource(pdev, IORESOURCE_IRQ, i); err = platform_get_irq(pdev, i);
if (!res) { if (err < 0)
dev_err(&pdev->dev, "No irq resource for %s\n", return err;
dev_name(&pdev->dev)); cqdma->pc[i]->irq = err;
return -EINVAL;
}
cqdma->pc[i]->irq = res->start;
err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
mtk_cqdma_irq, 0, dev_name(&pdev->dev), mtk_cqdma_irq, 0, dev_name(&pdev->dev),
......
...@@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) ...@@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
cb->flag = 0; cb->flag = 0;
} }
cb->vd = 0; cb->vd = NULL;
/* /*
* Recycle the RXD with the helper WRITE_ONCE that can ensure * Recycle the RXD with the helper WRITE_ONCE that can ensure
...@@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev) ...@@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return PTR_ERR(hsdma->clk); return PTR_ERR(hsdma->clk);
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); err = platform_get_irq(pdev, 0);
if (!res) { if (err < 0)
dev_err(&pdev->dev, "No irq resource for %s\n", return err;
dev_name(&pdev->dev)); hsdma->irq = err;
return -EINVAL;
}
hsdma->irq = res->start;
refcount_set(&hsdma->pc_refcnt, 0); refcount_set(&hsdma->pc_refcnt, 0);
spin_lock_init(&hsdma->lock); spin_lock_init(&hsdma->lock);
......
...@@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op) ...@@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return PTR_ERR(pdev->base); return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id) if (of_id) {
of_property_read_u32(pdev->dev->of_node, "#dma-channels", /* Parse new and deprecated dma-channels properties */
&dma_channels); if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
else if (pdata && pdata->dma_channels) &dma_channels))
of_property_read_u32(pdev->dev->of_node, "#dma-channels",
&dma_channels);
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels; dma_channels = pdata->dma_channels;
else } else {
dma_channels = 32; /* default 32 channel */ dma_channels = 32; /* default 32 channel */
}
pdev->dma_channels = dma_channels; pdev->dma_channels = dma_channels;
for (i = 0; i < dma_channels; i++) { for (i = 0; i < dma_channels; i++) {
......
...@@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t) ...@@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t)
dma_run_dependencies(&next_pending_sw_desc->async_tx); dma_run_dependencies(&next_pending_sw_desc->async_tx);
/* Lock the channel */ /* Lock the channel */
spin_lock_bh(&xor_dev->lock); spin_lock(&xor_dev->lock);
/* add the SW descriptor to the free descriptors list */ /* add the SW descriptor to the free descriptors list */
list_add(&next_pending_sw_desc->free_list, list_add(&next_pending_sw_desc->free_list,
&xor_dev->free_sw_desc); &xor_dev->free_sw_desc);
/* Release the channel */ /* Release the channel */
spin_unlock_bh(&xor_dev->lock); spin_unlock(&xor_dev->lock);
/* increment the next descriptor */ /* increment the next descriptor */
pending_ptr++; pending_ptr++;
......
...@@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev) ...@@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct nbpf_device *nbpf; struct nbpf_device *nbpf;
struct dma_device *dma_dev; struct dma_device *dma_dev;
struct resource *iomem, *irq_res; struct resource *iomem;
const struct nbpf_config *cfg; const struct nbpf_config *cfg;
int num_channels; int num_channels;
int ret, irq, eirq, i; int ret, irq, eirq, i;
...@@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev) ...@@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev)
nbpf->config = cfg; nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); irq = platform_get_irq_optional(pdev, i);
if (!irq_res) if (irq < 0 && irq != -ENXIO)
break; return irq;
if (irq > 0)
for (irq = irq_res->start; irq <= irq_res->end; irqbuf[irqs++] = irq;
irq++, irqs++)
irqbuf[irqs] = irq;
} }
/* /*
......
...@@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev) ...@@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
struct plx_dma_desc *desc; struct plx_dma_desc *desc;
u32 flags; u32 flags;
spin_lock_bh(&plxdev->ring_lock); spin_lock(&plxdev->ring_lock);
while (plxdev->tail != plxdev->head) { while (plxdev->tail != plxdev->head) {
desc = plx_dma_get_desc(plxdev, plxdev->tail); desc = plx_dma_get_desc(plxdev, plxdev->tail);
...@@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev) ...@@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
plxdev->tail++; plxdev->tail++;
} }
spin_unlock_bh(&plxdev->ring_lock); spin_unlock(&plxdev->ring_lock);
} }
static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
......
...@@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, ...@@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine) struct pt_passthru_engine *pt_engine)
{ {
struct ptdma_desc desc; struct ptdma_desc desc;
struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
cmd_q->cmd_error = 0; cmd_q->cmd_error = 0;
cmd_q->total_pt_ops++; cmd_q->total_pt_ops++;
...@@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, ...@@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
desc.dst_lo = lower_32_bits(pt_engine->dst_dma); desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma); desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
return pt_core_execute_cmd(&desc, cmd_q); if (cmd_q->int_en)
} pt_core_enable_queue_interrupts(pt);
else
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) pt_core_disable_queue_interrupts(pt);
{
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
}
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) return pt_core_execute_cmd(&desc, cmd_q);
{
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
} }
static void pt_do_cmd_complete(unsigned long data) static void pt_do_cmd_complete(unsigned long data)
...@@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data) ...@@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data)
cmd->pt_cmd_callback(cmd->data, cmd->ret); cmd->pt_cmd_callback(cmd->data, cmd->ret);
} }
static irqreturn_t pt_core_irq_handler(int irq, void *data) void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
{ {
struct pt_device *pt = data;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
u32 status; u32 status;
pt_core_disable_queue_interrupts(pt);
pt->total_interrupts++;
status = ioread32(cmd_q->reg_control + 0x0010); status = ioread32(cmd_q->reg_control + 0x0010);
if (status) { if (status) {
cmd_q->int_status = status; cmd_q->int_status = status;
...@@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data) ...@@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
if ((status & INT_ERROR) && !cmd_q->cmd_error) if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
/* Acknowledge the interrupt */ /* Acknowledge the completion */
iowrite32(status, cmd_q->reg_control + 0x0010); iowrite32(status, cmd_q->reg_control + 0x0010);
pt_core_enable_queue_interrupts(pt);
pt_do_cmd_complete((ulong)&pt->tdata); pt_do_cmd_complete((ulong)&pt->tdata);
} }
}
static irqreturn_t pt_core_irq_handler(int irq, void *data)
{
struct pt_device *pt = data;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
pt_core_disable_queue_interrupts(pt);
pt->total_interrupts++;
pt_check_status_trans(pt, cmd_q);
pt_core_enable_queue_interrupts(pt);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, ...@@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
vchan_tx_prep(&chan->vc, &desc->vd, flags); vchan_tx_prep(&chan->vc, &desc->vd, flags);
desc->pt = chan->pt; desc->pt = chan->pt;
desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
desc->issued_to_hw = 0; desc->issued_to_hw = 0;
desc->status = DMA_IN_PROGRESS; desc->status = DMA_IN_PROGRESS;
...@@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan) ...@@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0); pt_cmd_callback(desc, 0);
} }
static enum dma_status
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct pt_device *pt = to_pt_chan(c)->pt;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
pt_check_status_trans(pt, cmd_q);
return dma_cookie_status(c, cookie, txstate);
}
static int pt_pause(struct dma_chan *dma_chan) static int pt_pause(struct dma_chan *dma_chan)
{ {
struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_chan *chan = to_pt_chan(dma_chan);
...@@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan) ...@@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
{ {
struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags; unsigned long flags;
struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head); LIST_HEAD(head);
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
spin_lock_irqsave(&chan->vc.lock, flags); spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head); vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags); spin_unlock_irqrestore(&chan->vc.lock, flags);
...@@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt) ...@@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
dma_dev->device_issue_pending = pt_issue_pending; dma_dev->device_issue_pending = pt_issue_pending;
dma_dev->device_tx_status = dma_cookie_status; dma_dev->device_tx_status = pt_tx_status;
dma_dev->device_pause = pt_pause; dma_dev->device_pause = pt_pause;
dma_dev->device_resume = pt_resume; dma_dev->device_resume = pt_resume;
dma_dev->device_terminate_all = pt_terminate_all; dma_dev->device_terminate_all = pt_terminate_all;
......
...@@ -206,6 +206,9 @@ struct pt_cmd_queue { ...@@ -206,6 +206,9 @@ struct pt_cmd_queue {
unsigned int active; unsigned int active;
unsigned int suspended; unsigned int suspended;
/* Interrupt flag */
bool int_en;
/* Register addresses for queue */ /* Register addresses for queue */
void __iomem *reg_control; void __iomem *reg_control;
u32 qcontrol; /* Cached control register */ u32 qcontrol; /* Cached control register */
...@@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt); ...@@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt);
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine); struct pt_passthru_engine *pt_engine);
void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
void pt_start_queue(struct pt_cmd_queue *cmd_q); void pt_start_queue(struct pt_cmd_queue *cmd_q);
void pt_stop_queue(struct pt_cmd_queue *cmd_q); void pt_stop_queue(struct pt_cmd_queue *cmd_q);
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
{
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
}
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
{
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
}
#endif #endif
...@@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op) ...@@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op)
of_id = of_match_device(pxad_dt_ids, &op->dev); of_id = of_match_device(pxad_dt_ids, &op->dev);
if (of_id) { if (of_id) {
of_property_read_u32(op->dev.of_node, "#dma-channels", /* Parse new and deprecated dma-channels properties */
&dma_channels); if (of_property_read_u32(op->dev.of_node, "dma-channels",
ret = of_property_read_u32(op->dev.of_node, "#dma-requests", &dma_channels))
of_property_read_u32(op->dev.of_node, "#dma-channels",
&dma_channels);
/* Parse new and deprecated dma-requests properties */
ret = of_property_read_u32(op->dev.of_node, "dma-requests",
&nb_requestors); &nb_requestors);
if (ret)
ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
&nb_requestors);
if (ret) { if (ret) {
dev_warn(pdev->slave.dev, dev_warn(pdev->slave.dev,
"#dma-requests set to default 32 as missing in OF: %d", "#dma-requests set to default 32 as missing in OF: %d",
......
...@@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc, ...@@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN); tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
if (spi->cmd == SPI_RX) if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB); tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
else } else if (spi->cmd == SPI_TX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
} else { /* SPI_DUPLEX */
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
}
} }
/* create the dma tre */ /* create the dma tre */
...@@ -2148,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev) ...@@ -2148,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev)
{ {
struct gpi_dev *gpi_dev; struct gpi_dev *gpi_dev;
unsigned int i; unsigned int i;
u32 ee_offset;
int ret; int ret;
gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL); gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
...@@ -2175,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev) ...@@ -2175,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev)
return ret; return ret;
} }
ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev);
gpi_dev->ee_base = gpi_dev->ee_base - ee_offset;
gpi_dev->ev_factor = EV_FACTOR; gpi_dev->ev_factor = EV_FACTOR;
ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64)); ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
...@@ -2278,9 +2286,12 @@ static int gpi_probe(struct platform_device *pdev) ...@@ -2278,9 +2286,12 @@ static int gpi_probe(struct platform_device *pdev)
} }
static const struct of_device_id gpi_of_match[] = { static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sdm845-gpi-dma" }, { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sm8150-gpi-dma" }, { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8250-gpi-dma" }, { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 },
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, gpi_of_match); MODULE_DEVICE_TABLE(of, gpi_of_match);
......
...@@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, ...@@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
struct hidma_desc *mdesc = NULL; struct hidma_desc *mdesc = NULL;
struct hidma_dev *mdma = mchan->dmadev; struct hidma_dev *mdma = mchan->dmadev;
unsigned long irqflags; unsigned long irqflags;
u64 byte_pattern, fill_pattern;
/* Get free descriptor */ /* Get free descriptor */
spin_lock_irqsave(&mchan->lock, irqflags); spin_lock_irqsave(&mchan->lock, irqflags);
...@@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, ...@@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
if (!mdesc) if (!mdesc)
return NULL; return NULL;
byte_pattern = (char)value;
fill_pattern = (byte_pattern << 56) |
(byte_pattern << 48) |
(byte_pattern << 40) |
(byte_pattern << 32) |
(byte_pattern << 24) |
(byte_pattern << 16) |
(byte_pattern << 8) |
byte_pattern;
mdesc->desc.flags = flags; mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
value, dest, len, flags, fill_pattern, dest, len, flags,
HIDMA_TRE_MEMSET); HIDMA_TRE_MEMSET);
/* Place descriptor in prepared list */ /* Place descriptor in prepared list */
......
...@@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma) ...@@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev) static int sf_pdma_probe(struct platform_device *pdev)
{ {
struct sf_pdma *pdma; struct sf_pdma *pdma;
struct sf_pdma_chan *chan;
struct resource *res; struct resource *res;
int len, chans; int ret, n_chans;
int ret;
const enum dma_slave_buswidth widths = const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
DMA_SLAVE_BUSWIDTH_64_BYTES; DMA_SLAVE_BUSWIDTH_64_BYTES;
chans = PDMA_NR_CH; ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans);
len = sizeof(*pdma) + sizeof(*chan) * chans; if (ret) {
pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); /* backwards-compatibility for no dma-channels property */
dev_dbg(&pdev->dev, "set number of channels to default value: 4\n");
n_chans = PDMA_MAX_NR_CH;
} else if (n_chans > PDMA_MAX_NR_CH) {
dev_err(&pdev->dev, "the number of channels exceeds the maximum\n");
return -EINVAL;
}
pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans),
GFP_KERNEL);
if (!pdma) if (!pdma)
return -ENOMEM; return -ENOMEM;
pdma->n_chans = chans; pdma->n_chans = n_chans;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdma->membase = devm_ioremap_resource(&pdev->dev, res); pdma->membase = devm_ioremap_resource(&pdev->dev, res);
...@@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev) ...@@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
struct sf_pdma_chan *ch; struct sf_pdma_chan *ch;
int i; int i;
for (i = 0; i < PDMA_NR_CH; i++) { for (i = 0; i < pdma->n_chans; i++) {
ch = &pdma->chans[i]; ch = &pdma->chans[i];
devm_free_irq(&pdev->dev, ch->txirq, ch); devm_free_irq(&pdev->dev, ch->txirq, ch);
...@@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev) ...@@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
static const struct of_device_id sf_pdma_dt_ids[] = { static const struct of_device_id sf_pdma_dt_ids[] = {
{ .compatible = "sifive,fu540-c000-pdma" }, { .compatible = "sifive,fu540-c000-pdma" },
{ .compatible = "sifive,pdma0" },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
......
...@@ -22,11 +22,7 @@ ...@@ -22,11 +22,7 @@
#include "../dmaengine.h" #include "../dmaengine.h"
#include "../virt-dma.h" #include "../virt-dma.h"
#define PDMA_NR_CH 4 #define PDMA_MAX_NR_CH 4
#if (PDMA_NR_CH != 4)
#error "Please define PDMA_NR_CH to 4"
#endif
#define PDMA_BASE_ADDR 0x3000000 #define PDMA_BASE_ADDR 0x3000000
#define PDMA_CHAN_OFFSET 0x1000 #define PDMA_CHAN_OFFSET 0x1000
...@@ -118,7 +114,7 @@ struct sf_pdma { ...@@ -118,7 +114,7 @@ struct sf_pdma {
void __iomem *membase; void __iomem *membase;
void __iomem *mappedbase; void __iomem *mappedbase;
u32 n_chans; u32 n_chans;
struct sf_pdma_chan chans[PDMA_NR_CH]; struct sf_pdma_chan chans[];
}; };
#endif /* _SF_PDMA_H */ #endif /* _SF_PDMA_H */
...@@ -50,7 +50,7 @@ config RENESAS_USB_DMAC ...@@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
config RZ_DMAC config RZ_DMAC
tristate "Renesas RZ/{G2L,V2L} DMA Controller" tristate "Renesas RZ/{G2L,V2L} DMA Controller"
depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST depends on ARCH_RZG2L || COMPILE_TEST
select RENESAS_DMA select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
help help
......
...@@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev) ...@@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev)
u32 chn_count; u32 chn_count;
int ret, i; int ret, i;
ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count); /* Parse new and deprecated dma-channels properties */
ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
if (ret)
ret = device_property_read_u32(&pdev->dev, "#dma-channels",
&chn_count);
if (ret) { if (ret) {
dev_err(&pdev->dev, "get dma channels count failed\n"); dev_err(&pdev->dev, "get dma channels count failed\n");
return ret; return ret;
......
...@@ -208,6 +208,7 @@ struct stm32_dma_chan { ...@@ -208,6 +208,7 @@ struct stm32_dma_chan {
u32 threshold; u32 threshold;
u32 mem_burst; u32 mem_burst;
u32 mem_width; u32 mem_width;
enum dma_status status;
}; };
struct stm32_dma_device { struct stm32_dma_device {
...@@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan) ...@@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan)
} }
chan->busy = false; chan->busy = false;
chan->status = DMA_COMPLETE;
} }
static int stm32_dma_terminate_all(struct dma_chan *c) static int stm32_dma_terminate_all(struct dma_chan *c)
...@@ -535,6 +537,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) ...@@ -535,6 +537,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
} }
static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
{
chan->next_sg++;
if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
chan->next_sg = 0;
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
...@@ -575,7 +584,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) ...@@ -575,7 +584,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
chan->next_sg++; stm32_dma_sg_inc(chan);
/* Clear interrupt status if it is there */ /* Clear interrupt status if it is there */
status = stm32_dma_irq_status(chan); status = stm32_dma_irq_status(chan);
...@@ -588,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) ...@@ -588,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
stm32_dma_dump_reg(chan); stm32_dma_dump_reg(chan);
/* Start DMA */ /* Start DMA */
chan->busy = true;
chan->status = DMA_IN_PROGRESS;
reg->dma_scr |= STM32_DMA_SCR_EN; reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
chan->busy = true;
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
} }
...@@ -605,41 +614,131 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) ...@@ -605,41 +614,131 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
id = chan->id; id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
if (dma_scr & STM32_DMA_SCR_DBM) { sg_req = &chan->desc->sg_req[chan->next_sg];
if (chan->next_sg == chan->desc->num_sgs)
chan->next_sg = 0;
sg_req = &chan->desc->sg_req[chan->next_sg]; if (dma_scr & STM32_DMA_SCR_CT) {
dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
} else {
dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
}
}
if (dma_scr & STM32_DMA_SCR_CT) { static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
dma_sm0ar = sg_req->chan_reg.dma_sm0ar; {
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", u32 dma_scr;
stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
} else { /*
dma_sm1ar = sg_req->chan_reg.dma_sm1ar; * Read and store current remaining data items and peripheral/memory addresses to be
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); * updated on resume
dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", */
stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
} /*
* Transfer can be paused while between a previous resume and reconfiguration on transfer
* complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
* to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
*/
if (chan->desc && chan->desc->cyclic) {
if (chan->desc->num_sgs == 1)
dma_scr |= STM32_DMA_SCR_CIRC;
else
dma_scr |= STM32_DMA_SCR_DBM;
}
chan->chan_reg.dma_scr = dma_scr;
/*
* Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
* on resume NDTR autoreload value will be wrong (lower than the initial period length)
*/
if (chan->desc && chan->desc->cyclic) {
dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
}
chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_sg_req *sg_req;
u32 dma_scr, status, id;
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
/* Clear interrupt status if it is there */
status = stm32_dma_irq_status(chan);
if (status)
stm32_dma_irq_clear(chan, status);
if (!chan->next_sg)
sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
else
sg_req = &chan->desc->sg_req[chan->next_sg - 1];
/* Reconfigure NDTR with the initial value */
stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
/* Restore SPAR */
stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
/* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
/* Reactivate CIRC/DBM if needed */
if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
dma_scr |= STM32_DMA_SCR_DBM;
/* Restore CT */
if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
dma_scr &= ~STM32_DMA_SCR_CT;
else
dma_scr |= STM32_DMA_SCR_CT;
} else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
dma_scr |= STM32_DMA_SCR_CIRC;
} }
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
stm32_dma_configure_next_sg(chan);
stm32_dma_dump_reg(chan);
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
} }
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
{ {
if (chan->desc) { if (!chan->desc)
if (chan->desc->cyclic) { return;
vchan_cyclic_callback(&chan->desc->vdesc);
chan->next_sg++; if (chan->desc->cyclic) {
vchan_cyclic_callback(&chan->desc->vdesc);
stm32_dma_sg_inc(chan);
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
else if (scr & STM32_DMA_SCR_DBM)
stm32_dma_configure_next_sg(chan); stm32_dma_configure_next_sg(chan);
} else { } else {
chan->busy = false; chan->busy = false;
if (chan->next_sg == chan->desc->num_sgs) { chan->status = DMA_COMPLETE;
vchan_cookie_complete(&chan->desc->vdesc); if (chan->next_sg == chan->desc->num_sgs) {
chan->desc = NULL; vchan_cookie_complete(&chan->desc->vdesc);
} chan->desc = NULL;
stm32_dma_start_transfer(chan);
} }
stm32_dma_start_transfer(chan);
} }
} }
...@@ -675,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) ...@@ -675,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (status & STM32_DMA_TCI) { if (status & STM32_DMA_TCI) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI); stm32_dma_irq_clear(chan, STM32_DMA_TCI);
if (scr & STM32_DMA_SCR_TCIE) if (scr & STM32_DMA_SCR_TCIE) {
stm32_dma_handle_chan_done(chan); if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
stm32_dma_handle_chan_paused(chan);
else
stm32_dma_handle_chan_done(chan, scr);
}
status &= ~STM32_DMA_TCI; status &= ~STM32_DMA_TCI;
} }
...@@ -711,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c) ...@@ -711,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags); spin_unlock_irqrestore(&chan->vchan.lock, flags);
} }
static int stm32_dma_pause(struct dma_chan *c)
{
struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
unsigned long flags;
int ret;
if (chan->status != DMA_IN_PROGRESS)
return -EPERM;
spin_lock_irqsave(&chan->vchan.lock, flags);
ret = stm32_dma_disable_chan(chan);
/*
* A transfer complete flag is set to indicate the end of transfer due to the stream
* interruption, so wait for interrupt
*/
if (!ret)
chan->status = DMA_PAUSED;
spin_unlock_irqrestore(&chan->vchan.lock, flags);
return ret;
}
static int stm32_dma_resume(struct dma_chan *c)
{
struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
struct stm32_dma_sg_req *sg_req;
unsigned long flags;
if (chan->status != DMA_PAUSED)
return -EPERM;
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
if (WARN_ON(scr & STM32_DMA_SCR_EN))
return -EPERM;
spin_lock_irqsave(&chan->vchan.lock, flags);
/* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
if (!chan->next_sg)
sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
else
sg_req = &chan->desc->sg_req[chan->next_sg - 1];
ndtr = sg_req->chan_reg.dma_sndtr;
offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
spar = sg_req->chan_reg.dma_spar;
sm0ar = sg_req->chan_reg.dma_sm0ar;
sm1ar = sg_req->chan_reg.dma_sm1ar;
/*
* The peripheral and/or memory addresses have to be updated in order to adjust the
* address pointers. Need to check increment.
*/
if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
else
stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
offset = 0;
/*
* In case of DBM, the current target could be SM1AR.
* Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
* SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
*/
if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
else
stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
/* NDTR must be restored otherwise internal HW counter won't be correctly reset */
stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
/*
* Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
* otherwise NDTR autoreload value will be wrong (lower than the initial period length)
*/
if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
stm32_dma_configure_next_sg(chan);
stm32_dma_dump_reg(chan);
/* The stream may then be re-enabled to restart transfer from the point it was stopped */
chan->status = DMA_IN_PROGRESS;
chan_reg.dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
return 0;
}
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
enum dma_transfer_direction direction, enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth, enum dma_slave_buswidth *buswidth,
...@@ -978,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( ...@@ -978,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
} }
/* Enable Circular mode or double buffer mode */ /* Enable Circular mode or double buffer mode */
if (buf_len == period_len) if (buf_len == period_len) {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
else } else {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
}
/* Clear periph ctrl if client set it */ /* Clear periph ctrl if client set it */
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
...@@ -1091,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) ...@@ -1091,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
{ {
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_sg_req *sg_req; struct stm32_dma_sg_req *sg_req;
u32 dma_scr, dma_smar, id; u32 dma_scr, dma_smar, id, period_len;
id = chan->id; id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
/* In cyclic CIRC but not DBM, CT is not used */
if (!(dma_scr & STM32_DMA_SCR_DBM)) if (!(dma_scr & STM32_DMA_SCR_DBM))
return true; return true;
sg_req = &chan->desc->sg_req[chan->next_sg]; sg_req = &chan->desc->sg_req[chan->next_sg];
period_len = sg_req->len;
/* DBM - take care of a previous pause/resume not yet post reconfigured */
if (dma_scr & STM32_DMA_SCR_CT) { if (dma_scr & STM32_DMA_SCR_CT) {
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
return (dma_smar == sg_req->chan_reg.dma_sm0ar); /*
* If transfer has been pause/resumed,
* SM0AR is in the range of [SM0AR:SM0AR+period_len]
*/
return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
} }
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
/*
return (dma_smar == sg_req->chan_reg.dma_sm1ar); * If transfer has been pause/resumed,
* SM1AR is in the range of [SM1AR:SM1AR+period_len]
*/
return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
} }
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
...@@ -1148,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, ...@@ -1148,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue = stm32_dma_get_remaining_bytes(chan); residue = stm32_dma_get_remaining_bytes(chan);
if (!stm32_dma_is_current_sg(chan)) { if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
n_sg++; n_sg++;
if (n_sg == chan->desc->num_sgs) if (n_sg == chan->desc->num_sgs)
n_sg = 0; n_sg = 0;
...@@ -1188,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, ...@@ -1188,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
u32 residue = 0; u32 residue = 0;
status = dma_cookie_status(c, cookie, state); status = dma_cookie_status(c, cookie, state);
if (status == DMA_COMPLETE || !state) if (status == DMA_COMPLETE)
return status;
status = chan->status;
if (!state)
return status; return status;
spin_lock_irqsave(&chan->vchan.lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags);
...@@ -1377,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1377,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
dd->device_config = stm32_dma_slave_config; dd->device_config = stm32_dma_slave_config;
dd->device_pause = stm32_dma_pause;
dd->device_resume = stm32_dma_resume;
dd->device_terminate_all = stm32_dma_terminate_all; dd->device_terminate_all = stm32_dma_terminate_all;
dd->device_synchronize = stm32_dma_synchronize; dd->device_synchronize = stm32_dma_synchronize;
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
...@@ -1482,7 +1707,7 @@ static int stm32_dma_runtime_resume(struct device *dev) ...@@ -1482,7 +1707,7 @@ static int stm32_dma_runtime_resume(struct device *dev)
#endif #endif
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static int stm32_dma_suspend(struct device *dev) static int stm32_dma_pm_suspend(struct device *dev)
{ {
struct stm32_dma_device *dmadev = dev_get_drvdata(dev); struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr; int id, ret, scr;
...@@ -1506,14 +1731,14 @@ static int stm32_dma_suspend(struct device *dev) ...@@ -1506,14 +1731,14 @@ static int stm32_dma_suspend(struct device *dev)
return 0; return 0;
} }
static int stm32_dma_resume(struct device *dev) static int stm32_dma_pm_resume(struct device *dev)
{ {
return pm_runtime_force_resume(dev); return pm_runtime_force_resume(dev);
} }
#endif #endif
static const struct dev_pm_ops stm32_dma_pm_ops = { static const struct dev_pm_ops stm32_dma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume) SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
stm32_dma_runtime_resume, NULL) stm32_dma_runtime_resume, NULL)
}; };
......
...@@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ...@@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = PTR_ERR(rst); ret = PTR_ERR(rst);
if (ret == -EPROBE_DEFER) if (ret == -EPROBE_DEFER)
goto err_clk; goto err_clk;
} else { } else if (count > 1) { /* Don't reset if there is only one dma-master */
reset_control_assert(rst); reset_control_assert(rst);
udelay(2); udelay(2);
reset_control_deassert(rst); reset_control_deassert(rst);
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include "virt-dma.h" #include "virt-dma.h"
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
/* MDMA Channel x interrupt/status register */ /* MDMA Channel x interrupt/status register */
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
...@@ -73,6 +72,7 @@ ...@@ -73,6 +72,7 @@
#define STM32_MDMA_CCR_WEX BIT(14) #define STM32_MDMA_CCR_WEX BIT(14)
#define STM32_MDMA_CCR_HEX BIT(13) #define STM32_MDMA_CCR_HEX BIT(13)
#define STM32_MDMA_CCR_BEX BIT(12) #define STM32_MDMA_CCR_BEX BIT(12)
#define STM32_MDMA_CCR_SM BIT(8)
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n)) #define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
#define STM32_MDMA_CCR_TCIE BIT(5) #define STM32_MDMA_CCR_TCIE BIT(5)
...@@ -168,7 +168,7 @@ ...@@ -168,7 +168,7 @@
#define STM32_MDMA_MAX_BUF_LEN 128 #define STM32_MDMA_MAX_BUF_LEN 128
#define STM32_MDMA_MAX_BLOCK_LEN 65536 #define STM32_MDMA_MAX_BLOCK_LEN 65536
#define STM32_MDMA_MAX_CHANNELS 63 #define STM32_MDMA_MAX_CHANNELS 32
#define STM32_MDMA_MAX_REQUESTS 256 #define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128 #define STM32_MDMA_MAX_BURST 128
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3 #define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
...@@ -248,6 +248,7 @@ struct stm32_mdma_device { ...@@ -248,6 +248,7 @@ struct stm32_mdma_device {
u32 nr_channels; u32 nr_channels;
u32 nr_requests; u32 nr_requests;
u32 nr_ahb_addr_masks; u32 nr_ahb_addr_masks;
u32 chan_reserved;
struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
u32 ahb_addr_masks[]; u32 ahb_addr_masks[];
}; };
...@@ -1317,26 +1318,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) ...@@ -1317,26 +1318,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{ {
struct stm32_mdma_device *dmadev = devid; struct stm32_mdma_device *dmadev = devid;
struct stm32_mdma_chan *chan = devid; struct stm32_mdma_chan *chan;
u32 reg, id, ccr, ien, status; u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */ /* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
if (status) { if (!status) {
id = __ffs(status); dev_dbg(mdma2dev(dmadev), "spurious it\n");
} else { return IRQ_NONE;
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
if (!status) {
dev_dbg(mdma2dev(dmadev), "spurious it\n");
return IRQ_NONE;
}
id = __ffs(status);
/*
* As GISR0 provides status for channel id from 0 to 31,
* so GISR1 provides status for channel id from 32 to 62
*/
id += 32;
} }
id = __ffs(status);
chan = &dmadev->chan[id]; chan = &dmadev->chan[id];
if (!chan) { if (!chan) {
...@@ -1354,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) ...@@ -1354,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
if (!(status & ien)) { if (!(status & ien)) {
spin_unlock(&chan->vchan.lock); spin_unlock(&chan->vchan.lock);
dev_warn(chan2dev(chan), if (chan->busy)
"spurious it (status=0x%04x, ien=0x%04x)\n", dev_warn(chan2dev(chan),
status, ien); "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
else
dev_dbg(chan2dev(chan),
"spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -1456,10 +1450,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c) ...@@ -1456,10 +1450,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
chan->desc_pool = NULL; chan->desc_pool = NULL;
} }
static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
{
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
/* Check if chan is marked Secure */
if (dmadev->chan_reserved & BIT(chan->id))
return false;
return true;
}
static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma) struct of_dma *ofdma)
{ {
struct stm32_mdma_device *dmadev = ofdma->of_dma_data; struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
dma_cap_mask_t mask = dmadev->ddev.cap_mask;
struct stm32_mdma_chan *chan; struct stm32_mdma_chan *chan;
struct dma_chan *c; struct dma_chan *c;
struct stm32_mdma_chan_config config; struct stm32_mdma_chan_config config;
...@@ -1485,7 +1492,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, ...@@ -1485,7 +1492,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
return NULL; return NULL;
} }
c = dma_get_any_slave_channel(&dmadev->ddev); c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
if (!c) { if (!c) {
dev_err(mdma2dev(dmadev), "No more channels available\n"); dev_err(mdma2dev(dmadev), "No more channels available\n");
return NULL; return NULL;
...@@ -1615,6 +1622,10 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1615,6 +1622,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
for (i = 0; i < dmadev->nr_channels; i++) { for (i = 0; i < dmadev->nr_channels; i++) {
chan = &dmadev->chan[i]; chan = &dmadev->chan[i];
chan->id = i; chan->id = i;
if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
dmadev->chan_reserved |= BIT(i);
chan->vchan.desc_free = stm32_mdma_desc_free; chan->vchan.desc_free = stm32_mdma_desc_free;
vchan_init(&chan->vchan, dd); vchan_init(&chan->vchan, dd);
} }
......
...@@ -90,6 +90,14 @@ ...@@ -90,6 +90,14 @@
#define DMA_CHAN_CUR_PARA 0x1c #define DMA_CHAN_CUR_PARA 0x1c
/*
* LLI address mangling
*
* The LLI link physical address is also mangled, but we avoid dealing
* with that by allocating LLIs from the DMA32 zone.
*/
#define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16)
#define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18)
/* /*
* Various hardware related defines * Various hardware related defines
...@@ -132,6 +140,7 @@ struct sun6i_dma_config { ...@@ -132,6 +140,7 @@ struct sun6i_dma_config {
u32 dst_burst_lengths; u32 dst_burst_lengths;
u32 src_addr_widths; u32 src_addr_widths;
u32 dst_addr_widths; u32 dst_addr_widths;
bool has_high_addr;
bool has_mbus_clk; bool has_mbus_clk;
}; };
...@@ -241,9 +250,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) ...@@ -241,9 +250,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
struct sun6i_pchan *pchan) struct sun6i_pchan *pchan)
{ {
phys_addr_t reg = virt_to_phys(pchan->base); dev_dbg(sdev->slave.dev, "Chan %d reg:\n"
dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
"\t___en(%04x): \t0x%08x\n" "\t___en(%04x): \t0x%08x\n"
"\tpause(%04x): \t0x%08x\n" "\tpause(%04x): \t0x%08x\n"
"\tstart(%04x): \t0x%08x\n" "\tstart(%04x): \t0x%08x\n"
...@@ -252,7 +259,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, ...@@ -252,7 +259,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
"\t__dst(%04x): \t0x%08x\n" "\t__dst(%04x): \t0x%08x\n"
"\tcount(%04x): \t0x%08x\n" "\tcount(%04x): \t0x%08x\n"
"\t_para(%04x): \t0x%08x\n\n", "\t_para(%04x): \t0x%08x\n\n",
pchan->idx, &reg, pchan->idx,
DMA_CHAN_ENABLE, DMA_CHAN_ENABLE,
readl(pchan->base + DMA_CHAN_ENABLE), readl(pchan->base + DMA_CHAN_ENABLE),
DMA_CHAN_PAUSE, DMA_CHAN_PAUSE,
...@@ -385,17 +392,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, ...@@ -385,17 +392,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
} }
static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
struct sun6i_dma_lli *lli) struct sun6i_dma_lli *v_lli,
dma_addr_t p_lli)
{ {
phys_addr_t p_lli = virt_to_phys(lli);
dev_dbg(chan2dev(&vchan->vc.chan), dev_dbg(chan2dev(&vchan->vc.chan),
"\n\tdesc: p - %pa v - 0x%p\n" "\n\tdesc:\tp - %pad v - 0x%p\n"
"\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
"\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
&p_lli, lli, &p_lli, v_lli,
lli->cfg, lli->src, lli->dst, v_lli->cfg, v_lli->src, v_lli->dst,
lli->len, lli->para, lli->p_lli_next); v_lli->len, v_lli->para, v_lli->p_lli_next);
} }
static void sun6i_dma_free_desc(struct virt_dma_desc *vd) static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
...@@ -445,7 +451,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) ...@@ -445,7 +451,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
pchan->desc = to_sun6i_desc(&desc->tx); pchan->desc = to_sun6i_desc(&desc->tx);
pchan->done = NULL; pchan->done = NULL;
sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli);
irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
...@@ -626,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev, ...@@ -626,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev,
return 0; return 0;
} }
static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev,
struct sun6i_dma_lli *v_lli,
dma_addr_t src, dma_addr_t dst)
{
v_lli->src = lower_32_bits(src);
v_lli->dst = lower_32_bits(dst);
if (sdev->cfg->has_high_addr)
v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) |
DST_HIGH_ADDR(upper_32_bits(dst));
}
static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags) size_t len, unsigned long flags)
...@@ -648,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( ...@@ -648,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
if (!txd) if (!txd)
return NULL; return NULL;
v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) { if (!v_lli) {
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
goto err_txd_free; goto err_txd_free;
} }
v_lli->src = src;
v_lli->dst = dest;
v_lli->len = len; v_lli->len = len;
v_lli->para = NORMAL_WAIT; v_lli->para = NORMAL_WAIT;
sun6i_dma_set_addr(sdev, v_lli, src, dest);
burst = convert_burst(8); burst = convert_burst(8);
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
...@@ -670,7 +687,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( ...@@ -670,7 +687,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
sun6i_dma_dump_lli(vchan, v_lli); sun6i_dma_dump_lli(vchan, v_lli, p_lli);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags); return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
...@@ -708,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( ...@@ -708,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
return NULL; return NULL;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) if (!v_lli)
goto err_lli_free; goto err_lli_free;
...@@ -716,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( ...@@ -716,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
v_lli->para = NORMAL_WAIT; v_lli->para = NORMAL_WAIT;
if (dir == DMA_MEM_TO_DEV) { if (dir == DMA_MEM_TO_DEV) {
v_lli->src = sg_dma_address(sg); sun6i_dma_set_addr(sdev, v_lli,
v_lli->dst = sconfig->dst_addr; sg_dma_address(sg),
sconfig->dst_addr);
v_lli->cfg = lli_cfg; v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
...@@ -729,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( ...@@ -729,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
sg_dma_len(sg), flags); sg_dma_len(sg), flags);
} else { } else {
v_lli->src = sconfig->src_addr; sun6i_dma_set_addr(sdev, v_lli,
v_lli->dst = sg_dma_address(sg); sconfig->src_addr,
sg_dma_address(sg));
v_lli->cfg = lli_cfg; v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
...@@ -746,14 +765,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( ...@@ -746,14 +765,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
} }
dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
for (prev = txd->v_lli; prev; prev = prev->v_lli_next) for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
sun6i_dma_dump_lli(vchan, prev); p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
sun6i_dma_dump_lli(vchan, v_lli, p_lli);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags); return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_lli_free: err_lli_free:
for (prev = txd->v_lli; prev; prev = prev->v_lli_next) for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
dma_pool_free(sdev->pool, v_lli, p_lli);
kfree(txd); kfree(txd);
return NULL; return NULL;
} }
...@@ -787,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( ...@@ -787,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
return NULL; return NULL;
for (i = 0; i < periods; i++) { for (i = 0; i < periods; i++) {
v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) { if (!v_lli) {
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
goto err_lli_free; goto err_lli_free;
...@@ -797,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( ...@@ -797,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
v_lli->para = NORMAL_WAIT; v_lli->para = NORMAL_WAIT;
if (dir == DMA_MEM_TO_DEV) { if (dir == DMA_MEM_TO_DEV) {
v_lli->src = buf_addr + period_len * i; sun6i_dma_set_addr(sdev, v_lli,
v_lli->dst = sconfig->dst_addr; buf_addr + period_len * i,
sconfig->dst_addr);
v_lli->cfg = lli_cfg; v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
} else { } else {
v_lli->src = sconfig->src_addr; sun6i_dma_set_addr(sdev, v_lli,
v_lli->dst = buf_addr + period_len * i; sconfig->src_addr,
buf_addr + period_len * i);
v_lli->cfg = lli_cfg; v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
...@@ -820,8 +843,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( ...@@ -820,8 +843,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
return vchan_tx_prep(&vchan->vc, &txd->vd, flags); return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_lli_free: err_lli_free:
for (prev = txd->v_lli; prev; prev = prev->v_lli_next) for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
dma_pool_free(sdev->pool, v_lli, p_lli);
kfree(txd); kfree(txd);
return NULL; return NULL;
} }
...@@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { ...@@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
}; };
/* /*
* TODO: Add support for more than 4g physical addressing.
*
* The A100 binding uses the number of dma channels from the * The A100 binding uses the number of dma channels from the
* device tree node. * device tree node.
*/ */
...@@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = { ...@@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = {
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
.has_high_addr = true,
.has_mbus_clk = true, .has_mbus_clk = true,
}; };
...@@ -1248,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = { ...@@ -1248,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
{ .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
{ .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg }, { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* DMA driver for NVIDIA Tegra GPC DMA controller.
*
* Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <dt-bindings/memory/tegra186-mc.h>
#include "virt-dma.h"
/* CSR register */
#define TEGRA_GPCDMA_CHAN_CSR 0x00
#define TEGRA_GPCDMA_CSR_ENB BIT(31)
#define TEGRA_GPCDMA_CSR_IE_EOC BIT(30)
#define TEGRA_GPCDMA_CSR_ONCE BIT(27)
#define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24)
#define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \
FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
#define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \
FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
#define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \
FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
#define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \
FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
#define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21)
#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \
FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \
FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \
FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \
FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
#define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \
FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
#define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \
FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
#define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16)
#define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \
FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
#define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15)
#define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10)
/* STATUS register */
#define TEGRA_GPCDMA_CHAN_STATUS 0x004
#define TEGRA_GPCDMA_STATUS_BUSY BIT(31)
#define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30)
#define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28)
#define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27)
#define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26)
#define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25)
#define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24)
#define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23)
#define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21)
#define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20)
#define TEGRA_GPCDMA_CHAN_CSRE 0x008
#define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31)
/* Source address */
#define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C
/* Destination address */
#define TEGRA_GPCDMA_CHAN_DST_PTR 0x010
/* High address pointer */
#define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014
#define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0)
#define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16)
/* MC sequence register */
#define TEGRA_GPCDMA_CHAN_MCSEQ 0x18
#define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31)
#define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25)
#define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23)
#define TEGRA_GPCDMA_MCSEQ_BURST_2 \
FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
#define TEGRA_GPCDMA_MCSEQ_BURST_16 \
FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
#define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20)
#define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17)
#define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0
#define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7)
#define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0)
/* MMIO sequence register */
#define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c
#define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31)
#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28)
#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \
FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \
FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \
FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
#define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27)
#define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23
#define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U
#define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U
#define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \
(GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
#define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19)
#define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16)
#define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7)
/* Channel WCOUNT */
#define TEGRA_GPCDMA_CHAN_WCOUNT 0x20
/* Transfer count */
#define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24
/* DMA byte count status */
#define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28
/* Error Status Register */
#define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30
#define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8
#define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
#define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \
((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \
TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
#define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF
#define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE
#define TEGRA_DMA_PERIPH_ID_ERR 0xD
#define TEGRA_DMA_STREAM_ID_ERR 0xC
#define TEGRA_DMA_MC_SLAVE_ERR 0xB
#define TEGRA_DMA_MMIO_SLAVE_ERR 0xA
/* Fixed Pattern */
#define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34
#define TEGRA_GPCDMA_CHAN_TZ 0x38
#define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0)
#define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1)
#define TEGRA_GPCDMA_CHAN_SPARE 0x3c
#define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16)
/*
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
*/
#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
/* Channel base address offset from GPCDMA base address */
#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
struct tegra_dma;
struct tegra_dma_channel;
/*
* tegra_dma_chip_data Tegra chip specific DMA data
* @nr_channels: Number of channels available in the controller.
* @channel_reg_size: Channel register size.
* @max_dma_count: Maximum DMA transfer count supported by DMA controller.
* @hw_support_pause: DMA HW engine support pause of the channel.
*/
struct tegra_dma_chip_data {
bool hw_support_pause;
unsigned int nr_channels;
unsigned int channel_reg_size;
unsigned int max_dma_count;
int (*terminate)(struct tegra_dma_channel *tdc);
};
/* DMA channel registers */
struct tegra_dma_channel_regs {
u32 csr;
u32 src_ptr;
u32 dst_ptr;
u32 high_addr_ptr;
u32 mc_seq;
u32 mmio_seq;
u32 wcount;
u32 fixed_pattern;
};
/*
* tegra_dma_sg_req: DMA request details to configure hardware. This
* contains the details for one transfer to configure DMA hw.
* The client's request for data transfer can be broken into multiple
* sub-transfer as per requester details and hw support. This sub transfer
* get added as an array in Tegra DMA desc which manages the transfer details.
*/
struct tegra_dma_sg_req {
unsigned int len;
struct tegra_dma_channel_regs ch_regs;
};
/*
* tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
* manage client request and keep track of transfer status, callbacks
* and request counts etc.
*/
struct tegra_dma_desc {
bool cyclic;
unsigned int bytes_req;
unsigned int bytes_xfer;
unsigned int sg_idx;
unsigned int sg_count;
struct virt_dma_desc vd;
struct tegra_dma_channel *tdc;
struct tegra_dma_sg_req sg_req[];
};
/*
* tegra_dma_channel: Channel specific information
*/
struct tegra_dma_channel {
bool config_init;
char name[30];
enum dma_transfer_direction sid_dir;
int id;
int irq;
int slave_id;
struct tegra_dma *tdma;
struct virt_dma_chan vc;
struct tegra_dma_desc *dma_desc;
struct dma_slave_config dma_sconfig;
unsigned int stream_id;
unsigned long chan_base_offset;
};
/*
* tegra_dma: Tegra DMA specific information
*/
struct tegra_dma {
const struct tegra_dma_chip_data *chip_data;
unsigned long sid_m2d_reserved;
unsigned long sid_d2m_reserved;
void __iomem *base_addr;
struct device *dev;
struct dma_device dma_dev;
struct reset_control *rst;
struct tegra_dma_channel channels[];
};
static inline void tdc_write(struct tegra_dma_channel *tdc,
u32 reg, u32 val)
{
writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
}
static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
{
return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
}
static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
{
return container_of(dc, struct tegra_dma_channel, vc.chan);
}
static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct tegra_dma_desc, vd);
}
static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
{
return tdc->vc.chan.device->dev;
}
static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc)
{
dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n",
tdc->id, tdc->name);
dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR)
);
dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT),
tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS)
);
dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n",
tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS));
}
static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc,
enum dma_transfer_direction direction)
{
struct tegra_dma *tdma = tdc->tdma;
int sid = tdc->slave_id;
if (!is_slave_direction(direction))
return 0;
switch (direction) {
case DMA_MEM_TO_DEV:
if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
dev_err(tdma->dev, "slave id already in use\n");
return -EINVAL;
}
break;
case DMA_DEV_TO_MEM:
if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
dev_err(tdma->dev, "slave id already in use\n");
return -EINVAL;
}
break;
default:
break;
}
tdc->sid_dir = direction;
return 0;
}
static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
{
struct tegra_dma *tdma = tdc->tdma;
int sid = tdc->slave_id;
switch (tdc->sid_dir) {
case DMA_MEM_TO_DEV:
clear_bit(sid, &tdma->sid_m2d_reserved);
break;
case DMA_DEV_TO_MEM:
clear_bit(sid, &tdma->sid_d2m_reserved);
break;
default:
break;
}
tdc->sid_dir = DMA_TRANS_NONE;
}
static void tegra_dma_desc_free(struct virt_dma_desc *vd)
{
kfree(container_of(vd, struct tegra_dma_desc, vd));
}
static int tegra_dma_slave_config(struct dma_chan *dc,
struct dma_slave_config *sconfig)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
tdc->config_init = true;
return 0;
}
static int tegra_dma_pause(struct tegra_dma_channel *tdc)
{
int ret;
u32 val;
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
/* Wait until busy bit is de-asserted */
ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
val,
!(val & TEGRA_GPCDMA_STATUS_BUSY),
TEGRA_GPCDMA_BURST_COMPLETE_TIME,
TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
if (ret) {
dev_err(tdc2dev(tdc), "DMA pause timed out\n");
tegra_dma_dump_chan_regs(tdc);
}
return ret;
}
static int tegra_dma_device_pause(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags;
int ret;
if (!tdc->tdma->chip_data->hw_support_pause)
return -ENOSYS;
spin_lock_irqsave(&tdc->vc.lock, flags);
ret = tegra_dma_pause(tdc);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
return ret;
}
static void tegra_dma_resume(struct tegra_dma_channel *tdc)
{
u32 val;
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
}
static int tegra_dma_device_resume(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags;
if (!tdc->tdma->chip_data->hw_support_pause)
return -ENOSYS;
spin_lock_irqsave(&tdc->vc.lock, flags);
tegra_dma_resume(tdc);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
return 0;
}
static void tegra_dma_disable(struct tegra_dma_channel *tdc)
{
u32 csr, status;
csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
/* Disable interrupts */
csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
/* Disable DMA */
csr &= ~TEGRA_GPCDMA_CSR_ENB;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
/* Clear interrupt status if it is there */
status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
}
}
static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc)
{
struct tegra_dma_desc *dma_desc = tdc->dma_desc;
struct tegra_dma_channel_regs *ch_regs;
int ret;
u32 val;
dma_desc->sg_idx++;
/* Reset the sg index for cyclic transfers */
if (dma_desc->sg_idx == dma_desc->sg_count)
dma_desc->sg_idx = 0;
/* Configure next transfer immediately after DMA is busy */
ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
val,
(val & TEGRA_GPCDMA_STATUS_BUSY), 0,
TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
if (ret)
return;
ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
/* Start DMA */
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
}
static void tegra_dma_start(struct tegra_dma_channel *tdc)
{
struct tegra_dma_desc *dma_desc = tdc->dma_desc;
struct tegra_dma_channel_regs *ch_regs;
struct virt_dma_desc *vdesc;
if (!dma_desc) {
vdesc = vchan_next_desc(&tdc->vc);
if (!vdesc)
return;
dma_desc = vd_to_tegra_dma_desc(vdesc);
list_del(&vdesc->node);
dma_desc->tdc = tdc;
tdc->dma_desc = dma_desc;
tegra_dma_resume(tdc);
}
ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
/* Start DMA */
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
}
static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
{
vchan_cookie_complete(&tdc->dma_desc->vd);
tegra_dma_sid_free(tdc);
tdc->dma_desc = NULL;
}
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
unsigned int err_status)
{
switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
case TEGRA_DMA_BM_FIFO_FULL_ERR:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d bm fifo full\n", tdc->id);
break;
case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d peripheral fifo full\n", tdc->id);
break;
case TEGRA_DMA_PERIPH_ID_ERR:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d illegal peripheral id\n", tdc->id);
break;
case TEGRA_DMA_STREAM_ID_ERR:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d illegal stream id\n", tdc->id);
break;
case TEGRA_DMA_MC_SLAVE_ERR:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d mc slave error\n", tdc->id);
break;
case TEGRA_DMA_MMIO_SLAVE_ERR:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d mmio slave error\n", tdc->id);
break;
default:
dev_err(tdc->tdma->dev,
"GPCDMA CH%d security violation %x\n", tdc->id,
err_status);
}
}
static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
{
struct tegra_dma_channel *tdc = dev_id;
struct tegra_dma_desc *dma_desc = tdc->dma_desc;
struct tegra_dma_sg_req *sg_req;
u32 status;
/* Check channel error status register */
status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
if (status) {
tegra_dma_chan_decode_error(tdc, status);
tegra_dma_dump_chan_regs(tdc);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
}
spin_lock(&tdc->vc.lock);
status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC))
goto irq_done;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
TEGRA_GPCDMA_STATUS_ISE_EOC);
if (!dma_desc)
goto irq_done;
sg_req = dma_desc->sg_req;
dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len;
if (dma_desc->cyclic) {
vchan_cyclic_callback(&dma_desc->vd);
tegra_dma_configure_next_sg(tdc);
} else {
dma_desc->sg_idx++;
if (dma_desc->sg_idx == dma_desc->sg_count)
tegra_dma_xfer_complete(tdc);
else
tegra_dma_start(tdc);
}
irq_done:
spin_unlock(&tdc->vc.lock);
return IRQ_HANDLED;
}
static void tegra_dma_issue_pending(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags;
if (tdc->dma_desc)
return;
spin_lock_irqsave(&tdc->vc.lock, flags);
if (vchan_issue_pending(&tdc->vc))
tegra_dma_start(tdc);
/*
* For cyclic DMA transfers, program the second
* transfer parameters as soon as the first DMA
* transfer is started inorder for the DMA
* controller to trigger the second transfer
* with the correct parameters.
*/
if (tdc->dma_desc && tdc->dma_desc->cyclic)
tegra_dma_configure_next_sg(tdc);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
}
static int tegra_dma_stop_client(struct tegra_dma_channel *tdc)
{
int ret;
u32 status, csr;
/*
* Change the client associated with the DMA channel
* to stop DMA engine from starting any more bursts for
* the given client and wait for in flight bursts to complete
*/
csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
/* Wait for in flight data transfer to finish */
udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
/* If TX/RX path is still active wait till it becomes
* inactive
*/
ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
tdc->chan_base_offset +
TEGRA_GPCDMA_CHAN_STATUS,
status,
!(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
5,
TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
if (ret) {
dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
tegra_dma_dump_chan_regs(tdc);
}
return ret;
}
static int tegra_dma_terminate_all(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags;
LIST_HEAD(head);
int err;
spin_lock_irqsave(&tdc->vc.lock, flags);
if (tdc->dma_desc) {
err = tdc->tdma->chip_data->terminate(tdc);
if (err) {
spin_unlock_irqrestore(&tdc->vc.lock, flags);
return err;
}
tegra_dma_disable(tdc);
tdc->dma_desc = NULL;
}
tegra_dma_sid_free(tdc);
vchan_get_all_descriptors(&tdc->vc, &head);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
vchan_dma_desc_free_list(&tdc->vc, &head);
return 0;
}
static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
{
struct tegra_dma_desc *dma_desc = tdc->dma_desc;
struct tegra_dma_sg_req *sg_req = dma_desc->sg_req;
unsigned int bytes_xfer, residual;
u32 wcount = 0, status;
wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
/*
* Set wcount = 0 if EOC bit is set. The transfer would have
* already completed and the CHAN_XFER_COUNT could have updated
* for the next transfer, specifically in case of cyclic transfers.
*/
status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
if (status & TEGRA_GPCDMA_STATUS_ISE_EOC)
wcount = 0;
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
}
static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc;
struct virt_dma_desc *vd;
unsigned int residual;
unsigned long flags;
enum dma_status ret;
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&tdc->vc.lock, flags);
vd = vchan_find_desc(&tdc->vc, cookie);
if (vd) {
dma_desc = vd_to_tegra_dma_desc(vd);
residual = dma_desc->bytes_req;
dma_set_residue(txstate, residual);
} else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) {
residual = tegra_dma_get_residual(tdc);
dma_set_residue(txstate, residual);
} else {
dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
}
spin_unlock_irqrestore(&tdc->vc.lock, flags);
return ret;
}
static inline int get_bus_width(struct tegra_dma_channel *tdc,
enum dma_slave_buswidth slave_bw)
{
switch (slave_bw) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
default:
dev_err(tdc2dev(tdc), "given slave bus width is not supported\n");
return -EINVAL;
}
}
static unsigned int get_burst_size(struct tegra_dma_channel *tdc,
u32 burst_size, enum dma_slave_buswidth slave_bw,
int len)
{
unsigned int burst_mmio_width, burst_byte;
/*
* burst_size from client is in terms of the bus_width.
* convert that into words.
* If burst_size is not specified from client, then use
* len to calculate the optimum burst size
*/
burst_byte = burst_size ? burst_size * slave_bw : len;
burst_mmio_width = burst_byte / 4;
if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN)
return 0;
burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX);
return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width);
}
static int get_transfer_param(struct tegra_dma_channel *tdc,
enum dma_transfer_direction direction,
u32 *apb_addr,
u32 *mmio_seq,
u32 *csr,
unsigned int *burst_size,
enum dma_slave_buswidth *slave_bw)
{
switch (direction) {
case DMA_MEM_TO_DEV:
*apb_addr = tdc->dma_sconfig.dst_addr;
*mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
*burst_size = tdc->dma_sconfig.dst_maxburst;
*slave_bw = tdc->dma_sconfig.dst_addr_width;
*csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
return 0;
case DMA_DEV_TO_MEM:
*apb_addr = tdc->dma_sconfig.src_addr;
*mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
*burst_size = tdc->dma_sconfig.src_maxburst;
*slave_bw = tdc->dma_sconfig.src_addr_width;
*csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
return 0;
default:
dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
}
return -EINVAL;
}
static struct dma_async_tx_descriptor *
tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
size_t len, unsigned long flags)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
struct tegra_dma_sg_req *sg_req;
struct tegra_dma_desc *dma_desc;
u32 csr, mc_seq;
if ((len & 3) || (dest & 3) || len > max_dma_count) {
dev_err(tdc2dev(tdc),
"DMA length/memory address is not supported\n");
return NULL;
}
/* Set DMA mode to fixed pattern */
csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
/* Enable once or continuous mode */
csr |= TEGRA_GPCDMA_CSR_ONCE;
/* Enable IRQ mask */
csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
/* Enable the DMA interrupt */
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_GPCDMA_CSR_IE_EOC;
/* Configure default priority weight for the channel */
csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
/* retain stream-id and clean rest */
mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
/* Set the address wrapping */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
/* Program outstanding MC requests */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
/* Set burst size */
mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
if (!dma_desc)
return NULL;
dma_desc->bytes_req = len;
dma_desc->sg_count = 1;
sg_req = dma_desc->sg_req;
sg_req[0].ch_regs.src_ptr = 0;
sg_req[0].ch_regs.dst_ptr = dest;
sg_req[0].ch_regs.high_addr_ptr =
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
sg_req[0].ch_regs.fixed_pattern = value;
/* Word count reg takes value as (N +1) words */
sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
sg_req[0].ch_regs.csr = csr;
sg_req[0].ch_regs.mmio_seq = 0;
sg_req[0].ch_regs.mc_seq = mc_seq;
sg_req[0].len = len;
dma_desc->cyclic = false;
return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
}
static struct dma_async_tx_descriptor *
tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_sg_req *sg_req;
struct tegra_dma_desc *dma_desc;
unsigned int max_dma_count;
u32 csr, mc_seq;
max_dma_count = tdc->tdma->chip_data->max_dma_count;
if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) {
dev_err(tdc2dev(tdc),
"DMA length/memory address is not supported\n");
return NULL;
}
/* Set DMA mode to memory to memory transfer */
csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
/* Enable once or continuous mode */
csr |= TEGRA_GPCDMA_CSR_ONCE;
/* Enable IRQ mask */
csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
/* Enable the DMA interrupt */
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_GPCDMA_CSR_IE_EOC;
/* Configure default priority weight for the channel */
csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
/* retain stream-id and clean rest */
mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
/* Set the address wrapping */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
/* Program outstanding MC requests */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
/* Set burst size */
mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
if (!dma_desc)
return NULL;
dma_desc->bytes_req = len;
dma_desc->sg_count = 1;
sg_req = dma_desc->sg_req;
sg_req[0].ch_regs.src_ptr = src;
sg_req[0].ch_regs.dst_ptr = dest;
sg_req[0].ch_regs.high_addr_ptr =
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
sg_req[0].ch_regs.high_addr_ptr |=
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
/* Word count reg takes value as (N +1) words */
sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
sg_req[0].ch_regs.csr = csr;
sg_req[0].ch_regs.mmio_seq = 0;
sg_req[0].ch_regs.mc_seq = mc_seq;
sg_req[0].len = len;
dma_desc->cyclic = false;
return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
}
static struct dma_async_tx_descriptor *
tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
struct tegra_dma_sg_req *sg_req;
struct tegra_dma_desc *dma_desc;
struct scatterlist *sg;
u32 burst_size;
unsigned int i;
int ret;
if (!tdc->config_init) {
dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
return NULL;
}
if (sg_len < 1) {
dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
return NULL;
}
ret = tegra_dma_sid_reserve(tdc, direction);
if (ret)
return NULL;
ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
&burst_size, &slave_bw);
if (ret < 0)
return NULL;
/* Enable once or continuous mode */
csr |= TEGRA_GPCDMA_CSR_ONCE;
/* Program the slave id in requestor select */
csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
/* Enable IRQ mask */
csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
/* Configure default priority weight for the channel*/
csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
/* Enable the DMA interrupt */
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_GPCDMA_CSR_IE_EOC;
mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
/* retain stream-id and clean rest */
mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
/* Set the address wrapping on both MC and MMIO side */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
/* Program 2 MC outstanding requests by default. */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
/* Setting MC burst size depending on MMIO burst size */
if (burst_size == 64)
mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
else
mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT);
if (!dma_desc)
return NULL;
dma_desc->sg_count = sg_len;
sg_req = dma_desc->sg_req;
/* Make transfer requests */
for_each_sg(sgl, sg, sg_len, i) {
u32 len;
dma_addr_t mem;
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if ((len & 3) || (mem & 3) || len > max_dma_count) {
dev_err(tdc2dev(tdc),
"DMA length/memory address is not supported\n");
kfree(dma_desc);
return NULL;
}
mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
dma_desc->bytes_req += len;
if (direction == DMA_MEM_TO_DEV) {
sg_req[i].ch_regs.src_ptr = mem;
sg_req[i].ch_regs.dst_ptr = apb_ptr;
sg_req[i].ch_regs.high_addr_ptr =
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
} else if (direction == DMA_DEV_TO_MEM) {
sg_req[i].ch_regs.src_ptr = apb_ptr;
sg_req[i].ch_regs.dst_ptr = mem;
sg_req[i].ch_regs.high_addr_ptr =
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
}
/*
* Word count register takes input in words. Writing a value
* of N into word count register means a req of (N+1) words.
*/
sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
sg_req[i].ch_regs.csr = csr;
sg_req[i].ch_regs.mmio_seq = mmio_seq;
sg_req[i].ch_regs.mc_seq = mc_seq;
sg_req[i].len = len;
}
dma_desc->cyclic = false;
return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
}
static struct dma_async_tx_descriptor *
tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size;
unsigned int max_dma_count, len, period_count, i;
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
dma_addr_t mem = buf_addr;
int ret;
if (!buf_len || !period_len) {
dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
return NULL;
}
if (!tdc->config_init) {
dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
return NULL;
}
ret = tegra_dma_sid_reserve(tdc, direction);
if (ret)
return NULL;
/*
* We only support cycle transfer when buf_len is multiple of
* period_len.
*/
if (buf_len % period_len) {
dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
return NULL;
}
len = period_len;
max_dma_count = tdc->tdma->chip_data->max_dma_count;
if ((len & 3) || (buf_addr & 3) || len > max_dma_count) {
dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
return NULL;
}
ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
&burst_size, &slave_bw);
if (ret < 0)
return NULL;
/* Enable once or continuous mode */
csr &= ~TEGRA_GPCDMA_CSR_ONCE;
/* Program the slave id in requestor select */
csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
/* Enable IRQ mask */
csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
/* Configure default priority weight for the channel*/
csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
/* Enable the DMA interrupt */
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_GPCDMA_CSR_IE_EOC;
mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
/* retain stream-id and clean rest */
mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
/* Set the address wrapping on both MC and MMIO side */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
/* Program 2 MC outstanding requests by default. */
mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
/* Setting MC burst size depending on MMIO burst size */
if (burst_size == 64)
mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
else
mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
period_count = buf_len / period_len;
dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count),
GFP_NOWAIT);
if (!dma_desc)
return NULL;
dma_desc->bytes_req = buf_len;
dma_desc->sg_count = period_count;
sg_req = dma_desc->sg_req;
/* Split transfer equal to period size */
for (i = 0; i < period_count; i++) {
mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
if (direction == DMA_MEM_TO_DEV) {
sg_req[i].ch_regs.src_ptr = mem;
sg_req[i].ch_regs.dst_ptr = apb_ptr;
sg_req[i].ch_regs.high_addr_ptr =
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
} else if (direction == DMA_DEV_TO_MEM) {
sg_req[i].ch_regs.src_ptr = apb_ptr;
sg_req[i].ch_regs.dst_ptr = mem;
sg_req[i].ch_regs.high_addr_ptr =
FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
}
/*
* Word count register takes input in words. Writing a value
* of N into word count register means a req of (N+1) words.
*/
sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
sg_req[i].ch_regs.csr = csr;
sg_req[i].ch_regs.mmio_seq = mmio_seq;
sg_req[i].ch_regs.mc_seq = mc_seq;
sg_req[i].len = len;
mem += len;
}
dma_desc->cyclic = true;
return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
}
static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int ret;
ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
if (ret) {
dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name);
return ret;
}
dma_cookie_init(&tdc->vc.chan);
tdc->config_init = false;
return 0;
}
static void tegra_dma_chan_synchronize(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
synchronize_irq(tdc->irq);
vchan_synchronize(&tdc->vc);
}
static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
tegra_dma_terminate_all(dc);
synchronize_irq(tdc->irq);
tasklet_kill(&tdc->vc.task);
tdc->config_init = false;
tdc->slave_id = -1;
tdc->sid_dir = DMA_TRANS_NONE;
free_irq(tdc->irq, tdc);
vchan_free_chan_resources(&tdc->vc);
}
static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct tegra_dma *tdma = ofdma->of_dma_data;
struct tegra_dma_channel *tdc;
struct dma_chan *chan;
chan = dma_get_any_slave_channel(&tdma->dma_dev);
if (!chan)
return NULL;
tdc = to_tegra_dma_chan(chan);
tdc->slave_id = dma_spec->args[0];
return chan;
}
static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
.nr_channels = 31,
.channel_reg_size = SZ_64K,
.max_dma_count = SZ_1G,
.hw_support_pause = false,
.terminate = tegra_dma_stop_client,
};
static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
.nr_channels = 31,
.channel_reg_size = SZ_64K,
.max_dma_count = SZ_1G,
.hw_support_pause = true,
.terminate = tegra_dma_pause,
};
static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra186-gpcdma",
.data = &tegra186_dma_chip_data,
}, {
.compatible = "nvidia,tegra194-gpcdma",
.data = &tegra194_dma_chip_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
{
unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
return 0;
}
static int tegra_dma_probe(struct platform_device *pdev)
{
const struct tegra_dma_chip_data *cdata = NULL;
struct iommu_fwspec *iommu_spec;
unsigned int stream_id, i;
struct tegra_dma *tdma;
int ret;
cdata = of_device_get_match_data(&pdev->dev);
tdma = devm_kzalloc(&pdev->dev,
struct_size(tdma, channels, cdata->nr_channels),
GFP_KERNEL);
if (!tdma)
return -ENOMEM;
tdma->dev = &pdev->dev;
tdma->chip_data = cdata;
platform_set_drvdata(pdev, tdma);
tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
if (IS_ERR(tdma->rst)) {
return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
"Missing controller reset\n");
}
reset_control_reset(tdma->rst);
tdma->dma_dev.dev = &pdev->dev;
iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
if (!iommu_spec) {
dev_err(&pdev->dev, "Missing iommu stream-id\n");
return -EINVAL;
}
stream_id = iommu_spec->ids[0] & 0xffff;
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
tdc->irq = platform_get_irq(pdev, i);
if (tdc->irq < 0)
return tdc->irq;
tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET +
i * cdata->channel_reg_size;
snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i);
tdc->tdma = tdma;
tdc->id = i;
tdc->slave_id = -1;
vchan_init(&tdc->vc, &tdma->dma_dev);
tdc->vc.desc_free = tegra_dma_desc_free;
/* program stream-id for this channel */
tegra_dma_program_sid(tdc, stream_id);
tdc->stream_id = stream_id;
}
dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
/*
* Only word aligned transfers are supported. Set the copy
* alignment shift.
*/
tdma->dma_dev.copy_align = 2;
tdma->dma_dev.fill_align = 2;
tdma->dma_dev.device_alloc_chan_resources =
tegra_dma_alloc_chan_resources;
tdma->dma_dev.device_free_chan_resources =
tegra_dma_free_chan_resources;
tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy;
tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset;
tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
tdma->dma_dev.device_config = tegra_dma_slave_config;
tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
tdma->dma_dev.device_pause = tegra_dma_device_pause;
tdma->dma_dev.device_resume = tegra_dma_device_resume;
tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize;
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret,
"GPC DMA driver registration failed\n");
return ret;
}
ret = of_dma_controller_register(pdev->dev.of_node,
tegra_dma_of_xlate, tdma);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret,
"GPC DMA OF registration failed\n");
dma_async_device_unregister(&tdma->dma_dev);
return ret;
}
dev_info(&pdev->dev, "GPC DMA driver register %d channels\n",
cdata->nr_channels);
return 0;
}
static int tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
return 0;
}
static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
unsigned int i;
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
if (tdc->dma_desc) {
dev_err(tdma->dev, "channel %u busy\n", i);
return -EBUSY;
}
}
return 0;
}
static int __maybe_unused tegra_dma_pm_resume(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
unsigned int i;
reset_control_reset(tdma->rst);
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
tegra_dma_program_sid(tdc, tdc->stream_id);
}
return 0;
}
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
};
static struct platform_driver tegra_dma_driver = {
.driver = {
.name = "tegra-gpcdma",
.pm = &tegra_dma_dev_pm_ops,
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
.remove = tegra_dma_remove,
};
module_platform_driver(tegra_dma_driver);
MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
MODULE_LICENSE("GPL");
...@@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev) ...@@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->qmgr_num_pend = glue_info->qmgr_num_pend; cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
cdd->first_completion_queue = glue_info->first_completion_queue; cdd->first_completion_queue = glue_info->first_completion_queue;
/* Parse new and deprecated dma-channels properties */
ret = of_property_read_u32(dev->of_node, ret = of_property_read_u32(dev->of_node,
"#dma-channels", &cdd->n_chans); "dma-channels", &cdd->n_chans);
if (ret)
ret = of_property_read_u32(dev->of_node,
"#dma-channels", &cdd->n_chans);
if (ret) if (ret)
goto err_get_n_chans; goto err_get_n_chans;
......
...@@ -70,10 +70,10 @@ ...@@ -70,10 +70,10 @@
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep am62_src_ep_map[] = { static struct psil_ep am62_src_ep_map[] = {
/* SAUL */ /* SAUL */
PSIL_SAUL(0x7500, 20, 35, 8, 35, 0), PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
PSIL_SAUL(0x7501, 21, 35, 8, 36, 0), PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7502, 22, 43, 8, 43, 0), PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7503, 23, 43, 8, 44, 0), PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-3 */ /* PDMA_MAIN0 - SPI0-3 */
PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303), PSIL_PDMA_XY_PKT(0x4303),
......
...@@ -229,7 +229,7 @@ struct zynqmp_dma_chan { ...@@ -229,7 +229,7 @@ struct zynqmp_dma_chan {
bool is_dmacoherent; bool is_dmacoherent;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
bool idle; bool idle;
u32 desc_size; size_t desc_size;
bool err; bool err;
u32 bus_width; u32 bus_width;
u32 src_burst_len; u32 src_burst_len;
...@@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) ...@@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
} }
chan->desc_pool_v = dma_alloc_coherent(chan->dev, chan->desc_pool_v = dma_alloc_coherent(chan->dev,
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
ZYNQMP_DMA_NUM_DESCS),
&chan->desc_pool_p, GFP_KERNEL); &chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v) if (!chan->desc_pool_v)
return -ENOMEM; return -ENOMEM;
...@@ -1077,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev) ...@@ -1077,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
pm_runtime_use_autosuspend(zdev->dev); pm_runtime_use_autosuspend(zdev->dev);
pm_runtime_enable(zdev->dev); pm_runtime_enable(zdev->dev);
pm_runtime_get_sync(zdev->dev); ret = pm_runtime_resume_and_get(zdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "device wakeup failed.\n");
pm_runtime_disable(zdev->dev);
}
if (!pm_runtime_enabled(zdev->dev)) { if (!pm_runtime_enabled(zdev->dev)) {
ret = zynqmp_dma_runtime_resume(zdev->dev); ret = zynqmp_dma_runtime_resume(zdev->dev);
if (ret) if (ret)
...@@ -1093,7 +1098,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev) ...@@ -1093,7 +1098,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
p->src_addr_widths = BIT(zdev->chan->bus_width / 8); p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
dma_async_device_register(&zdev->common); ret = dma_async_device_register(&zdev->common);
if (ret) {
dev_err(zdev->dev, "failed to register the dma device\n");
goto free_chan_resources;
}
ret = of_dma_controller_register(pdev->dev.of_node, ret = of_dma_controller_register(pdev->dev.of_node,
of_zynqmp_dma_xlate, zdev); of_zynqmp_dma_xlate, zdev);
......
...@@ -870,7 +870,6 @@ struct dma_device { ...@@ -870,7 +870,6 @@ struct dma_device {
struct device *dev; struct device *dev;
struct module *owner; struct module *owner;
struct ida chan_ida; struct ida chan_ida;
struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths; u32 src_addr_widths;
u32 dst_addr_widths; u32 dst_addr_widths;
...@@ -1031,6 +1030,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( ...@@ -1031,6 +1030,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags); return chan->device->device_prep_interleaved_dma(chan, xt, flags);
} }
/**
* dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
* @chan: The channel to be used for this descriptor
* @dest: Address of buffer to be set
* @value: Treated as a single byte value that fills the destination buffer
* @len: The total size of dest
* @flags: DMA engine flags
*/
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len, struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags) unsigned long flags)
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
#ifdef CONFIG_CLK_R9A06G032
int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val);
#else
static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; }
#endif
#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
...@@ -53,6 +53,11 @@ enum idxd_scmd_stat { ...@@ -53,6 +53,11 @@ enum idxd_scmd_stat {
/* IAX */ /* IAX */
#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000 #define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000
#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000
#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000
#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000
#define IDXD_OP_FLAG_SRC2_STS 0x100000
#define IDXD_OP_FLAG_CRC_RFC3720 0x200000
/* Opcode */ /* Opcode */
enum dsa_opcode { enum dsa_opcode {
...@@ -81,6 +86,18 @@ enum iax_opcode { ...@@ -81,6 +86,18 @@ enum iax_opcode {
IAX_OPCODE_MEMMOVE, IAX_OPCODE_MEMMOVE,
IAX_OPCODE_DECOMPRESS = 0x42, IAX_OPCODE_DECOMPRESS = 0x42,
IAX_OPCODE_COMPRESS, IAX_OPCODE_COMPRESS,
IAX_OPCODE_CRC64,
IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
IAX_OPCODE_ZERO_DECOMP_16,
IAX_OPCODE_DECOMP_32 = 0x4c,
IAX_OPCODE_DECOMP_16,
IAX_OPCODE_SCAN = 0x50,
IAX_OPCODE_SET_MEMBER,
IAX_OPCODE_EXTRACT,
IAX_OPCODE_SELECT,
IAX_OPCODE_RLE_BURST,
IAX_OPCDE_FIND_UNIQUE,
IAX_OPCODE_EXPAND,
}; };
/* Completion record status */ /* Completion record status */
...@@ -120,6 +137,7 @@ enum iax_completion_status { ...@@ -120,6 +137,7 @@ enum iax_completion_status {
IAX_COMP_NONE = 0, IAX_COMP_NONE = 0,
IAX_COMP_SUCCESS, IAX_COMP_SUCCESS,
IAX_COMP_PAGE_FAULT_IR = 0x04, IAX_COMP_PAGE_FAULT_IR = 0x04,
IAX_COMP_ANALYTICS_ERROR = 0x0a,
IAX_COMP_OUTBUF_OVERFLOW, IAX_COMP_OUTBUF_OVERFLOW,
IAX_COMP_BAD_OPCODE = 0x10, IAX_COMP_BAD_OPCODE = 0x10,
IAX_COMP_INVALID_FLAGS, IAX_COMP_INVALID_FLAGS,
...@@ -140,7 +158,10 @@ enum iax_completion_status { ...@@ -140,7 +158,10 @@ enum iax_completion_status {
IAX_COMP_WATCHDOG, IAX_COMP_WATCHDOG,
IAX_COMP_INVALID_COMP_FLAG = 0x30, IAX_COMP_INVALID_COMP_FLAG = 0x30,
IAX_COMP_INVALID_FILTER_FLAG, IAX_COMP_INVALID_FILTER_FLAG,
IAX_COMP_INVALID_NUM_ELEMS = 0x33, IAX_COMP_INVALID_INPUT_SIZE,
IAX_COMP_INVALID_NUM_ELEMS,
IAX_COMP_INVALID_SRC1_WIDTH,
IAX_COMP_INVALID_INVERT_OUT,
}; };
#define DSA_COMP_STATUS_MASK 0x7f #define DSA_COMP_STATUS_MASK 0x7f
...@@ -319,8 +340,12 @@ struct iax_completion_record { ...@@ -319,8 +340,12 @@ struct iax_completion_record {
uint32_t output_size; uint32_t output_size;
uint8_t output_bits; uint8_t output_bits;
uint8_t rsvd3; uint8_t rsvd3;
uint16_t rsvd4; uint16_t xor_csum;
uint64_t rsvd5[4]; uint32_t crc;
uint32_t min;
uint32_t max;
uint32_t sum;
uint64_t rsvd4[2];
} __attribute__((packed)); } __attribute__((packed));
struct iax_raw_completion_record { struct iax_raw_completion_record {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册