aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-09-03 10:49:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-09-03 10:49:42 -0700
commit708283abf896dd4853e673cc8cba70acaf9bf4ea (patch)
tree645b11e464c36de9a170d76bd3d97a49e2371722
parentdb906f0ca6bb55b7237b880e06ec2fc95ab67e16 (diff)
parent72f5801a4e2b7122ed8ff5672ea965a0b3458e6b (diff)
downloadlinux-708283abf896dd4853e673cc8cba70acaf9bf4ea.tar.gz
Merge tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul: "New controller support and updates to drivers. New support: - Qualcomm SM6115 and QCM2290 dmaengine support - at_xdma support for microchip,sam9x7 controller Updates: - idxd updates for wq simplification and ats knob updates - fsl edma updates for v3 support - Xilinx AXI4-Stream control support - Yaml conversion for bcm dma binding" * tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (53 commits) dmaengine: fsl-edma: integrate v3 support dt-bindings: fsl-dma: fsl-edma: add edma3 compatible string dmaengine: fsl-edma: move tcd into struct fsl_dma_chan dmaengine: fsl-edma: refactor chan_name setup and safety dmaengine: fsl-edma: move clearing of register interrupt into setup_irq function dmaengine: fsl-edma: refactor using devm_clk_get_enabled dmaengine: fsl-edma: simply ATTR_DSIZE and ATTR_SSIZE by using ffs() dmaengine: fsl-edma: move common IRQ handler to common.c dmaengine: fsl-edma: Remove enum edma_version dmaengine: fsl-edma: transition from bool fields to bitmask flags in drvdata dmaengine: fsl-edma: clean up EXPORT_SYMBOL_GPL in fsl-edma-common.c dmaengine: fsl-edma: fix build error when arch is s390 dmaengine: idxd: Fix issues with PRS disable sysfs knob dmaengine: idxd: Allow ATS disable update only for configurable devices dmaengine: xilinx_dma: Program interrupt delay timeout dmaengine: xilinx_dma: Use tasklet_hi_schedule for timing critical usecase dmaengine: xilinx_dma: Freeup active list based on descriptor completion bit dmaengine: xilinx_dma: Increase AXI DMA transaction segment count dmaengine: xilinx_dma: Pass AXI4-Stream control words to dma client dt-bindings: dmaengine: xilinx_dma: Add xlnx,irq-delay property ...
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd2
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-xdma.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt83
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml102
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml106
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml31
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt6
-rw-r--r--drivers/dma/Kconfig21
-rw-r--r--drivers/dma/Makefile7
-rw-r--r--drivers/dma/apple-admac.c3
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/bcm-sba-raid.c4
-rw-r--r--drivers/dma/bestcomm/bestcomm.c3
-rw-r--r--drivers/dma/dma-jz4780.c1
-rw-r--r--drivers/dma/dmaengine.c82
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c1
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c4
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-edma-common.c307
-rw-r--r--drivers/dma/fsl-edma-common.h127
-rw-r--r--drivers/dma/fsl-edma-main.c (renamed from drivers/dma/fsl-edma.c)313
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/fsl_raid.c3
-rw-r--r--drivers/dma/fsldma.c3
-rw-r--r--drivers/dma/idxd/device.c2
-rw-r--r--drivers/dma/idxd/idxd.h5
-rw-r--r--drivers/dma/idxd/perfmon.c7
-rw-r--r--drivers/dma/idxd/sysfs.c33
-rw-r--r--drivers/dma/img-mdc-dma.c1
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/init.c19
-rw-r--r--drivers/dma/ipu/Makefile2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1801
-rw-r--r--drivers/dma/ipu/ipu_intern.h173
-rw-r--r--drivers/dma/ipu/ipu_irq.c367
-rw-r--r--drivers/dma/lgm/lgm-dma.c7
-rw-r--r--drivers/dma/lpc18xx-dmamux.c4
-rw-r--r--drivers/dma/mcf-edma-main.c (renamed from drivers/dma/mcf-edma.c)43
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c1
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c4
-rw-r--r--drivers/dma/mxs-dma.c1
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/owl-dma.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/qcom/gpi.c3
-rw-r--r--drivers/dma/qcom/hidma.c14
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c5
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sh/shdmac.c8
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-dma.c3
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/dma/stm32-mdma.c1
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra186-gpc-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/tegra210-adma.c3
-rw-r--r--drivers/dma/ti/dma-crossbar.c5
-rw-r--r--drivers/dma/ti/edma.c1
-rw-r--r--drivers/dma/ti/k3-udma-private.c2
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c74
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
71 files changed, 998 insertions, 2871 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index 534b7a3d59fc59..825e619250bf2e 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -84,7 +84,7 @@ What: /sys/bus/dsa/devices/dsa<m>/pasid_enabled
Date: Oct 27, 2020
KernelVersion: 5.11.0
Contact: dmaengine@vger.kernel.org
-Description: To indicate if PASID (process address space identifier) is
+Description: To indicate if user PASID (process address space identifier) is
enabled or not for this device.
What: /sys/bus/dsa/devices/dsa<m>/state
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
index 510b7f25ba2423..76d649b3a25dc0 100644
--- a/Documentation/devicetree/bindings/dma/atmel-xdma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
@@ -3,7 +3,8 @@
* XDMA Controller
Required properties:
- compatible: Should be "atmel,sama5d4-dma", "microchip,sam9x60-dma" or
- "microchip,sama7g5-dma".
+ "microchip,sama7g5-dma" or
+ "microchip,sam9x7-dma", "atmel,sama5d4-dma".
- reg: Should contain DMA registers location and length.
- interrupts: Should contain DMA interrupt.
- #dma-cells: Must be <1>, used to represent the number of integer cells in
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
deleted file mode 100644
index b6a8cc0978cd5f..00000000000000
--- a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-* BCM2835 DMA controller
-
-The BCM2835 DMA controller has 16 channels in total.
-Only the lower 13 channels have an associated IRQ.
-Some arbitrary channels are used by the firmware
-(1,3,6,7 in the current firmware version).
-The channels 0,2 and 3 have special functionality
-and should not be used by the driver.
-
-Required properties:
-- compatible: Should be "brcm,bcm2835-dma".
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain the DMA interrupts associated
- to the DMA channels in ascending order.
-- interrupt-names: Should contain the names of the interrupt
- in the form "dmaXX".
- Use "dma-shared-all" for the common interrupt line
- that is shared by all dma channels.
-- #dma-cells: Must be <1>, the cell in the dmas property of the
- client device represents the DREQ number.
-- brcm,dma-channel-mask: Bit mask representing the channels
- not used by the firmware in ascending order,
- i.e. first channel corresponds to LSB.
-
-Example:
-
-dma: dma@7e007000 {
- compatible = "brcm,bcm2835-dma";
- reg = <0x7e007000 0xf00>;
- interrupts = <1 16>,
- <1 17>,
- <1 18>,
- <1 19>,
- <1 20>,
- <1 21>,
- <1 22>,
- <1 23>,
- <1 24>,
- <1 25>,
- <1 26>,
- /* dma channel 11-14 share one irq */
- <1 27>,
- <1 27>,
- <1 27>,
- <1 27>,
- /* unused shared irq for all channels */
- <1 28>;
- interrupt-names = "dma0",
- "dma1",
- "dma2",
- "dma3",
- "dma4",
- "dma5",
- "dma6",
- "dma7",
- "dma8",
- "dma9",
- "dma10",
- "dma11",
- "dma12",
- "dma13",
- "dma14",
- "dma-shared-all";
-
- #dma-cells = <1>;
- brcm,dma-channel-mask = <0x7f35>;
-};
-
-
-DMA clients connected to the BCM2835 DMA controller must use the format
-described in the dma.txt file, using a two-cell specifier for each channel.
-
-Example:
-
-bcm2835_i2s: i2s@7e203000 {
- compatible = "brcm,bcm2835-i2s";
- reg = < 0x7e203000 0x24>;
- clocks = <&clocks BCM2835_CLOCK_PCM>;
-
- dmas = <&dma 2>,
- <&dma 3>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml
new file mode 100644
index 00000000000000..c9b9a5490826cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/brcm,bcm2835-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2835 DMA controller
+
+maintainers:
+ - Nicolas Saenz Julienne <nsaenz@kernel.org>
+
+description:
+ The BCM2835 DMA controller has 16 channels in total. Only the lower
+ 13 channels have an associated IRQ. Some arbitrary channels are used by the
+ VideoCore firmware (1,3,6,7 in the current firmware version). The channels
+ 0, 2 and 3 have special functionality and should not be used by the driver.
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm2835-dma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Should contain the DMA interrupts associated to the DMA channels in
+ ascending order.
+ minItems: 1
+ maxItems: 16
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 16
+
+ '#dma-cells':
+ description: The single cell represents the DREQ number.
+ const: 1
+
+ brcm,dma-channel-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Bitmask of available DMA channels in ascending order that are
+ not reserved by firmware and are available to the
+ kernel. i.e. first channel corresponds to LSB.
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#dma-cells"
+ - brcm,dma-channel-mask
+
+examples:
+ - |
+ dma-controller@7e007000 {
+ compatible = "brcm,bcm2835-dma";
+ reg = <0x7e007000 0xf00>;
+ interrupts = <1 16>,
+ <1 17>,
+ <1 18>,
+ <1 19>,
+ <1 20>,
+ <1 21>,
+ <1 22>,
+ <1 23>,
+ <1 24>,
+ <1 25>,
+ <1 26>,
+ /* dma channel 11-14 share one irq */
+ <1 27>,
+ <1 27>,
+ <1 27>,
+ <1 27>,
+ /* unused shared irq for all channels */
+ <1 28>;
+ interrupt-names = "dma0",
+ "dma1",
+ "dma2",
+ "dma3",
+ "dma4",
+ "dma5",
+ "dma6",
+ "dma7",
+ "dma8",
+ "dma9",
+ "dma10",
+ "dma11",
+ "dma12",
+ "dma13",
+ "dma14",
+ "dma-shared-all";
+ #dma-cells = <1>;
+ brcm,dma-channel-mask = <0x7f35>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index 5fd8fc6042616b..437db0c62339fa 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -21,32 +21,41 @@ properties:
- enum:
- fsl,vf610-edma
- fsl,imx7ulp-edma
+ - fsl,imx8qm-adma
+ - fsl,imx8qm-edma
+ - fsl,imx93-edma3
+ - fsl,imx93-edma4
- items:
- const: fsl,ls1028a-edma
- const: fsl,vf610-edma
reg:
- minItems: 2
+ minItems: 1
maxItems: 3
interrupts:
- minItems: 2
- maxItems: 17
+ minItems: 1
+ maxItems: 64
interrupt-names:
- minItems: 2
- maxItems: 17
+ minItems: 1
+ maxItems: 64
"#dma-cells":
- const: 2
+ enum:
+ - 2
+ - 3
dma-channels:
- const: 32
+ minItems: 1
+ maxItems: 64
clocks:
+ minItems: 1
maxItems: 2
clock-names:
+ minItems: 1
maxItems: 2
big-endian:
@@ -69,21 +78,52 @@ allOf:
properties:
compatible:
contains:
+ enum:
+ - fsl,imx8qm-adma
+ - fsl,imx8qm-edma
+ - fsl,imx93-edma3
+ - fsl,imx93-edma4
+ then:
+ properties:
+ "#dma-cells":
+ const: 3
+ # It is not necessary to write the interrupt name for each channel.
+ # instead, you can simply maintain the sequential IRQ numbers as
+ # defined for the DMA channels.
+ interrupt-names: false
+ clock-names:
+ items:
+ - const: dma
+ clocks:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: fsl,vf610-edma
then:
properties:
+ clocks:
+ minItems: 2
clock-names:
items:
- const: dmamux0
- const: dmamux1
interrupts:
+ minItems: 2
maxItems: 2
interrupt-names:
items:
- const: edma-tx
- const: edma-err
reg:
+ minItems: 2
maxItems: 3
+ "#dma-cells":
+ const: 2
+ dma-channels:
+ const: 32
- if:
properties:
@@ -92,14 +132,22 @@ allOf:
const: fsl,imx7ulp-edma
then:
properties:
+ clock:
+ minItems: 2
clock-names:
items:
- const: dma
- const: dmamux0
interrupts:
+ minItems: 2
maxItems: 17
reg:
+ minItems: 2
maxItems: 2
+ "#dma-cells":
+ const: 2
+ dma-channels:
+ const: 32
unevaluatedProperties: false
@@ -153,3 +201,47 @@ examples:
clock-names = "dma", "dmamux0";
clocks = <&pcc2 IMX7ULP_CLK_DMA1>, <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
};
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx93-clock.h>
+
+ dma-controller@44000000 {
+ compatible = "fsl,imx93-edma3";
+ reg = <0x44000000 0x200000>;
+ #dma-cells = <3>;
+ dma-channels = <31>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX93_CLK_EDMA1_GATE>;
+ clock-names = "dma";
+ };
diff --git a/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml b/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
index 4f5510b6fa0211..3ad0d9b1fbc5e4 100644
--- a/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
@@ -15,13 +15,19 @@ allOf:
properties:
compatible:
- enum:
- # APQ8064, IPQ8064 and MSM8960
- - qcom,bam-v1.3.0
- # MSM8974, APQ8074 and APQ8084
- - qcom,bam-v1.4.0
- # MSM8916 and SDM845
- - qcom,bam-v1.7.0
+ oneOf:
+ - enum:
+ # APQ8064, IPQ8064 and MSM8960
+ - qcom,bam-v1.3.0
+ # MSM8974, APQ8074 and APQ8084
+ - qcom,bam-v1.4.0
+ # MSM8916, SDM630
+ - qcom,bam-v1.7.0
+ - items:
+ - enum:
+ # SDM845, SM6115, SM8150, SM8250 and QCM2290
+ - qcom,bam-v1.7.4
+ - const: qcom,bam-v1.7.0
clocks:
maxItems: 1
@@ -38,7 +44,7 @@ properties:
iommus:
minItems: 1
- maxItems: 4
+ maxItems: 6
num-channels:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -81,6 +87,15 @@ required:
- qcom,ee
- reg
+anyOf:
+ - required:
+ - qcom,powered-remotely
+ - required:
+ - qcom,controlled-remotely
+ - required:
+ - clocks
+ - clock-names
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index d1700a5c36bf83..590d1948f202c6 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -49,6 +49,12 @@ Optional properties for AXI DMA and MCDMA:
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
+
+Optional properties for AXI DMA:
+- xlnx,axistream-connected: Tells whether DMA is connected to AXI stream IP.
+- xlnx,irq-delay: Tells the interrupt delay timeout value. Valid range is from
+ 0-255. Setting this value to zero disables the delay timer interrupt.
+ 1 timeout interval = 125 * clock period of SG clock.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
It takes following values:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 08fdd0e2ed1bf0..4ccae1a3b88427 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -474,25 +474,6 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into some Freescale chips.
-config MX3_IPU
- bool "MX3x Image Processing Unit support"
- depends on ARCH_MXC
- select DMA_ENGINE
- default y
- help
- If you plan to use the Image Processing unit in the i.MX3x, say
- Y here. If unsure, select Y.
-
-config MX3_IPU_IRQS
- int "Number of dynamically mapped interrupts for IPU"
- depends on MX3_IPU
- range 2 137
- default 4
- help
- Out of 137 interrupt sources on i.MX31 IPU only very few are used.
- To avoid bloating the irq_desc[] array we allocate a sufficient
- number of IRQ slots and map them dynamically to specific sources.
-
config NBPFAXI_DMA
tristate "Renesas Type-AXI NBPF DMA support"
select DMA_ENGINE
@@ -699,7 +680,7 @@ config XGENE_DMA
config XILINX_DMA
tristate "Xilinx AXI DMAS Engine"
- depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Xilinx AXI VDMA Soft IP.
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a4fd1ce2951028..83553a97a010e1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
-obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
+fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HISI_DMA) += hisi_dma.o
@@ -55,7 +57,6 @@ obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
-obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 4cf8da77bdd913..3af795635c5ce6 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -10,8 +10,9 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ee3a219e3a897c..b2876f67471fe1 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/overflow.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 064761289a7389..94ea35330eb5c1 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -35,7 +35,9 @@
#include <linux/mailbox_client.h>
#include <linux/mailbox/brcm-message.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/raid/pq.h>
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index eabbcfcaa7cba1..80096f94032dc6 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -14,9 +14,8 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 9c1a6e9a9c0308..adbd47bd6adfe4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 826b98284fa1f8..b7388ae62d7f1f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1147,69 +1147,27 @@ int dma_async_device_register(struct dma_device *device)
device->owner = device->dev->driver->owner;
- if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_XOR");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_XOR_VAL");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_PQ");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_PQ_VAL");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMSET");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_INTERRUPT");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_CYCLIC");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_INTERLEAVE");
- return -EIO;
- }
+#define CHECK_CAP(_name, _type) \
+{ \
+ if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
+ dev_err(device->dev, \
+ "Device claims capability %s, but op is not defined\n", \
+ __stringify(_type)); \
+ return -EIO; \
+ } \
+}
+ CHECK_CAP(dma_memcpy, DMA_MEMCPY);
+ CHECK_CAP(dma_xor, DMA_XOR);
+ CHECK_CAP(dma_xor_val, DMA_XOR_VAL);
+ CHECK_CAP(dma_pq, DMA_PQ);
+ CHECK_CAP(dma_pq_val, DMA_PQ_VAL);
+ CHECK_CAP(dma_memset, DMA_MEMSET);
+ CHECK_CAP(dma_interrupt, DMA_INTERRUPT);
+ CHECK_CAP(dma_cyclic, DMA_CYCLIC);
+ CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
+
+#undef CHECK_CAP
if (!device->device_tx_status) {
dev_err(device->dev, "Device tx_status is not defined\n");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 796b6caf0babe1..dd02f84e404d08 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index f9912c3dd4d7c0..4fb8508419dbd8 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -5,8 +5,10 @@
* Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
#include <linux/bitops.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/types.h>
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5338a94f1a69f0..5c4a448a12541a 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1320,11 +1320,9 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ep93xx_dma_engine *edma;
struct dma_device *dma_dev;
- size_t edma_size;
int ret, i;
- edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
- edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+ edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
if (!edma)
return -ENOMEM;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a06a1575a2a502..a0f5741abcc479 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
@@ -40,14 +42,73 @@
#define EDMA64_ERRH 0x28
#define EDMA64_ERRL 0x2c
-#define EDMA_TCD 0x1000
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ return;
+ }
+
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+}
+
+static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val, flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+ val = edma_readl_chreg(fsl_chan, ch_sbr);
+ /* Remote/local swapped wrongly on iMX8 QM Audio edma */
+ if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
+ if (!fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ } else {
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ }
+
+ if (fsl_chan->is_remote)
+ val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
+
+ edma_writel_chreg(fsl_chan, val, ch_sbr);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val |= EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_enable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
edma_writeb(fsl_chan->edma, ch, regs->serq);
} else {
@@ -59,12 +120,29 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
}
}
+static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val = edma_readl_chreg(fsl_chan, ch_csr);
+ u32 flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, 0, ch_mux);
+
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
+
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_disable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, ch, regs->cerq);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
} else {
@@ -75,7 +153,6 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
u32 off, u32 slot, bool enable)
@@ -112,36 +189,33 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
+ if (!dmamux_nr)
+ return;
+
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- if (fsl_chan->edma->drvdata->mux_swap)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
ch_off += endian_diff[ch_off % 4];
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
- if (fsl_chan->edma->drvdata->version == v3)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
else
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
-EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
+ u32 val;
+
+ if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ val = ffs(addr_width) - 1;
+ return val | (val << 8);
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
@@ -155,7 +229,6 @@ void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
int fsl_edma_terminate_all(struct dma_chan *chan)
{
@@ -170,9 +243,12 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
+ pm_runtime_allow(fsl_chan->pd_dev);
+
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
int fsl_edma_pause(struct dma_chan *chan)
{
@@ -188,7 +264,6 @@ int fsl_edma_pause(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_pause);
int fsl_edma_resume(struct dma_chan *chan)
{
@@ -204,7 +279,6 @@ int fsl_edma_resume(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_resume);
static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
{
@@ -265,36 +339,41 @@ int fsl_edma_slave_config(struct dma_chan *chan,
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = edesc->dirn;
dma_addr_t cur_addr, dma_addr;
size_t len, size;
+ u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+ len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ }
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, saddr);
else
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, daddr);
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+
+ size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
@@ -340,14 +419,10 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
u16 csr = 0;
/*
@@ -356,23 +431,22 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
* big- or little-endian obeying the eDMA engine model endian,
* and this is performed from specific edma_write functions
*/
- edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
- edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
- edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
+ edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
+ edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
- edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
- edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
+ edma_write_tcdreg(fsl_chan, tcd->attr, attr);
+ edma_write_tcdreg(fsl_chan, tcd->soff, soff);
- edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
- edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
+ edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
+ edma_write_tcdreg(fsl_chan, tcd->slast, slast);
- edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
- edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
- edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+ edma_write_tcdreg(fsl_chan, tcd->citer, citer);
+ edma_write_tcdreg(fsl_chan, tcd->biter, biter);
+ edma_write_tcdreg(fsl_chan, tcd->doff, doff);
- edma_writel(edma, (s32)tcd->dlast_sga,
- &regs->tcd[ch].dlast_sga);
+ edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
if (fsl_chan->is_sw) {
csr = le16_to_cpu(tcd->csr);
@@ -380,16 +454,19 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
tcd->csr = cpu_to_le16(csr);
}
- edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, tcd->csr, csr);
}
static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
+ struct dma_slave_config *cfg = &fsl_chan->cfg;
u16 csr = 0;
+ u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -404,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
tcd->soff = cpu_to_le16(soff);
+ if (fsl_chan->is_multi_fifo) {
+ /* set mloff to support multiple fifo */
+ burst = cfg->direction == DMA_DEV_TO_MEM ?
+ cfg->src_addr_width : cfg->dst_addr_width;
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
+ /* enable DMLOE/SMLOE */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
+ } else {
+ nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
+ }
+ }
+
tcd->nbytes = cpu_to_le32(nbytes);
tcd->slast = cpu_to_le32(slast);
@@ -422,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
+ if (fsl_chan->is_rxchan)
+ csr |= EDMA_TCD_CSR_ACTIVE;
+
+ if (fsl_chan->is_sw)
+ csr |= EDMA_TCD_CSR_START;
+
tcd->csr = cpu_to_le16(csr);
}
@@ -461,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
dma_addr_t dma_buf_next;
+ bool major_int = true;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
@@ -504,23 +603,28 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
src_addr = dma_buf_next;
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
- doff = 0;
- } else {
+ doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
- soff = 0;
+ soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = doff = 0;
+ major_int = false;
}
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
fsl_chan->attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
+ iter, doff, last_sg, major_int, false, true);
dma_buf_next += period_len;
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -564,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
- } else {
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = 0;
+ doff = 0;
}
+ /*
+ * Choose the suitable burst length if sg_dma_len is not
+ * multiple of burst length so that the whole transfer length is
+ * multiple of minor loop(burst length).
+ */
+ if (sg_dma_len(sg) % nbytes) {
+ u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
+ u32 burst = (direction == DMA_DEV_TO_MEM) ?
+ fsl_chan->cfg.src_maxburst :
+ fsl_chan->cfg.dst_maxburst;
+ int j;
+
+ for (j = burst; j > 1; j--) {
+ if (!(sg_dma_len(sg) % (j * width))) {
+ nbytes = j * width;
+ break;
+ }
+ }
+ /* Set burst size as 1 if there's no suitable one */
+ if (j == 1)
+ nbytes = width;
+ }
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
false, false, true);
} else {
last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
true, true, false);
@@ -589,7 +721,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
dma_addr_t dma_dst, dma_addr_t dma_src,
@@ -606,13 +737,12 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
fsl_chan->is_sw = true;
/* To match with copy_align and max_seg_size so 1 tcd is enough */
- fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
- EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
+ fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
32, len, 0, 1, 1, 32, 0, true, true, false);
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
@@ -629,7 +759,6 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
void fsl_edma_issue_pending(struct dma_chan *chan)
{
@@ -649,7 +778,6 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -660,7 +788,6 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
32, 0);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
void fsl_edma_free_chan_resources(struct dma_chan *chan)
{
@@ -683,7 +810,6 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
{
@@ -695,12 +821,10 @@ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
tasklet_kill(&chan->vchan.task);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
/*
- * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
- * register offsets are different compared to ColdFire mcf5441x 64 channels
- * edma (here called "v2").
+ * On the 32 channels Vybrid/mpc577x edma version, register offsets are
+ * different compared to ColdFire mcf5441x 64 channels edma.
*
* This function sets up register offsets as per proper declared version
* so must be called in xxx_edma_probe() just after setting the
@@ -708,41 +832,30 @@ EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
*/
void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
{
+ bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
+
edma->regs.cr = edma->membase + EDMA_CR;
edma->regs.es = edma->membase + EDMA_ES;
edma->regs.erql = edma->membase + EDMA_ERQ;
edma->regs.eeil = edma->membase + EDMA_EEI;
- edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SERQ : EDMA_SERQ);
- edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERQ : EDMA_CERQ);
- edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SEEI : EDMA_SEEI);
- edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CEEI : EDMA_CEEI);
- edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CINT : EDMA_CINT);
- edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERR : EDMA_CERR);
- edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SSRT : EDMA_SSRT);
- edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CDNE : EDMA_CDNE);
- edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_INTL : EDMA_INTR);
- edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_ERRL : EDMA_ERR);
-
- if (edma->drvdata->version == v2) {
+ edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
+ edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
+ edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
+ edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
+ edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
+ edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
+ edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
+ edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
+ edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
+ edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
+
+ if (is64) {
edma->regs.erqh = edma->membase + EDMA64_ERQH;
edma->regs.eeih = edma->membase + EDMA64_EEIH;
edma->regs.errh = edma->membase + EDMA64_ERRH;
edma->regs.inth = edma->membase + EDMA64_INTH;
}
-
- edma->regs.tcd = edma->membase + EDMA_TCD;
}
-EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 004ec4a6bc86de..3cc0cc8fc2d05d 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -29,16 +29,6 @@
#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_ATTR_DSIZE_8BIT 0
-#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
-#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
-#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
-#define EDMA_TCD_ATTR_SSIZE_8BIT 0
-#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
@@ -52,16 +42,32 @@
#define EDMA_TCD_CSR_ACTIVE BIT(6)
#define EDMA_TCD_CSR_DONE BIT(7)
+#define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
+#define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
+#define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
+#define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
+
#define EDMAMUX_CHCFG_DIS 0x0
#define EDMAMUX_CHCFG_ENBL 0x80
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
#define DMAMUX_NR 2
+#define EDMA_TCD 0x1000
+
#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define EDMA_V3_CH_SBR_RD BIT(22)
+#define EDMA_V3_CH_SBR_WR BIT(21)
+#define EDMA_V3_CH_CSR_ERQ BIT(0)
+#define EDMA_V3_CH_CSR_EARQ BIT(1)
+#define EDMA_V3_CH_CSR_EEI BIT(2)
+#define EDMA_V3_CH_CSR_DONE BIT(30)
+#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@@ -81,6 +87,18 @@ struct fsl_edma_hw_tcd {
__le16 biter;
};
+struct fsl_edma3_ch_reg {
+ __le32 ch_csr;
+ __le32 ch_es;
+ __le32 ch_int;
+ __le32 ch_sbr;
+ __le32 ch_pri;
+ __le32 ch_mux;
+ __le32 ch_mattr; /* edma4, reserved for edma3 */
+ __le32 ch_reserved;
+ struct fsl_edma_hw_tcd tcd;
+} __packed;
+
/*
* These are iomem pointers, for both v32 and v64.
*/
@@ -103,7 +121,6 @@ struct edma_regs {
void __iomem *intl;
void __iomem *errh;
void __iomem *errl;
- struct fsl_edma_hw_tcd __iomem *tcd;
};
struct fsl_edma_sw_tcd {
@@ -126,7 +143,20 @@ struct fsl_edma_chan {
dma_addr_t dma_dev_addr;
u32 dma_dev_size;
enum dma_data_direction dma_dir;
- char chan_name[16];
+ char chan_name[32];
+ struct fsl_edma_hw_tcd __iomem *tcd;
+ u32 real_count;
+ struct work_struct issue_worker;
+ struct platform_device *pdev;
+ struct device *pd_dev;
+ u32 srcid;
+ struct clk *clk;
+ int priority;
+ int hw_chanid;
+ int txirq;
+ bool is_rxchan;
+ bool is_remote;
+ bool is_multi_fifo;
};
struct fsl_edma_desc {
@@ -138,17 +168,32 @@ struct fsl_edma_desc {
struct fsl_edma_sw_tcd tcd[];
};
-enum edma_version {
- v1, /* 32ch, Vybrid, mpc57x, etc */
- v2, /* 64ch Coldfire */
- v3, /* 32ch, i.mx7ulp */
-};
+#define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
+#define FSL_EDMA_DRV_MUX_SWAP BIT(1)
+#define FSL_EDMA_DRV_CONFIG32 BIT(2)
+#define FSL_EDMA_DRV_WRAP_IO BIT(3)
+#define FSL_EDMA_DRV_EDMA64 BIT(4)
+#define FSL_EDMA_DRV_HAS_PD BIT(5)
+#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
+#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
+/* imx8 QM audio edma remote local swapped */
+#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
+/* control and status register is in tcd address space, edma3 reg layout */
+#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
+#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
+#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
+#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
+
+#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
+ FSL_EDMA_DRV_BUS_8BYTE | \
+ FSL_EDMA_DRV_DEV_TO_DEV | \
+ FSL_EDMA_DRV_ALIGN_64BYTE)
struct fsl_edma_drvdata {
- enum edma_version version;
- u32 dmamuxs;
- bool has_dmaclk;
- bool mux_swap;
+ u32 dmamuxs; /* only used before v3 */
+ u32 chreg_off;
+ u32 chreg_space_sz;
+ u32 flags;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -159,6 +204,7 @@ struct fsl_edma_engine {
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk;
+ struct clk *chclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
@@ -166,9 +212,28 @@ struct fsl_edma_engine {
int errirq;
bool big_endian;
struct edma_regs regs;
+ u64 chan_masked;
struct fsl_edma_chan chans[];
};
+#define edma_read_tcdreg(chan, __name) \
+(sizeof(chan->tcd->__name) == sizeof(u32) ? \
+ edma_readl(chan->edma, &chan->tcd->__name) : \
+ edma_readw(chan->edma, &chan->tcd->__name))
+
+#define edma_write_tcdreg(chan, val, __name) \
+(sizeof(chan->tcd->__name) == sizeof(u32) ? \
+ edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
+ edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
+
+#define edma_readl_chreg(chan, __name) \
+ edma_readl(chan->edma, \
+ (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+
+#define edma_writel_chreg(chan, val, __name) \
+ edma_writel(chan->edma, val, \
+ (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
@@ -183,6 +248,14 @@ static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
return ioread32(addr);
}
+static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread16be(addr);
+ else
+ return ioread16(addr);
+}
+
static inline void edma_writeb(struct fsl_edma_engine *edma,
u8 val, void __iomem *addr)
{
@@ -217,11 +290,23 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
+static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
+{
+ return fsl_chan->edma->drvdata->flags;
+}
+
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
}
+static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ fsl_chan->status = DMA_ERROR;
+ fsl_chan->idle = true;
+}
+
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma-main.c
index e40769666e3933..63d48d046f046f 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -18,9 +18,15 @@
#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
+#define ARGS_RX BIT(0)
+#define ARGS_REMOTE BIT(1)
+#define ARGS_MULTI_FIFO BIT(2)
+
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -33,7 +39,6 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
struct edma_regs *regs = &fsl_edma->regs;
- struct fsl_edma_chan *fsl_chan;
intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
@@ -42,33 +47,25 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
+ fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
+ }
+ }
+ return IRQ_HANDLED;
+}
- fsl_chan = &fsl_edma->chans[ch];
-
- spin_lock(&fsl_chan->vchan.lock);
+static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+ unsigned int intr;
- if (!fsl_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&fsl_chan->vchan.lock);
- continue;
- }
+ intr = edma_readl_chreg(fsl_chan, ch_int);
+ if (!intr)
+ return IRQ_HANDLED;
- if (!fsl_chan->edesc->iscyclic) {
- list_del(&fsl_chan->edesc->vdesc.node);
- vchan_cookie_complete(&fsl_chan->edesc->vdesc);
- fsl_chan->edesc = NULL;
- fsl_chan->status = DMA_COMPLETE;
- fsl_chan->idle = true;
- } else {
- vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
- }
+ edma_writel_chreg(fsl_chan, 1, ch_int);
- if (!fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
+ fsl_edma_tx_chan_handler(fsl_chan);
- spin_unlock(&fsl_chan->vchan.lock);
- }
- }
return IRQ_HANDLED;
}
@@ -86,8 +83,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
- fsl_edma->chans[ch].status = DMA_ERROR;
- fsl_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
}
}
return IRQ_HANDLED;
@@ -134,11 +130,58 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
+static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+ struct fsl_edma_chan *fsl_chan;
+ bool b_chmux;
+ int i;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
+
+ mutex_lock(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
+ device_node) {
+
+ if (chan->client_count)
+ continue;
+
+ fsl_chan = to_fsl_edma_chan(chan);
+ i = fsl_chan - fsl_edma->chans;
+
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->priority = dma_spec->args[1];
+ fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+
+ if (!b_chmux && i == dma_spec->args[0]) {
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ } else if (b_chmux && !fsl_chan->srcid) {
+ /* if controller support channel mux, choose a free channel */
+ fsl_chan->srcid = dma_spec->args[0];
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+}
+
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
int ret;
+ edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
+
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
if (fsl_edma->txirq < 0)
return fsl_edma->txirq;
@@ -173,6 +216,37 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ /* request channel irq */
+ fsl_chan->txirq = platform_get_irq(pdev, i);
+ if (fsl_chan->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i);
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
+ fsl_edma3_tx_handler, IRQF_SHARED,
+ fsl_chan->chan_name, fsl_chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -180,6 +254,8 @@ fsl_edma2_irq_init(struct platform_device *pdev,
int i, ret, irq;
int count;
+ edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
+
count = platform_irq_count(pdev);
dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
if (count <= 2) {
@@ -197,8 +273,6 @@ fsl_edma2_irq_init(struct platform_device *pdev,
if (irq < 0)
return -ENXIO;
- sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
-
/* The last IRQ is for eDMA err */
if (i == count - 1)
ret = devm_request_irq(&pdev->dev, irq,
@@ -236,33 +310,110 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
}
static struct fsl_edma_drvdata vf610_data = {
- .version = v1,
.dmamuxs = DMAMUX_NR,
+ .flags = FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata ls1028a_data = {
- .version = v1,
.dmamuxs = DMAMUX_NR,
- .mux_swap = true,
+ .flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata imx7ulp_data = {
- .version = v3,
.dmamuxs = 1,
- .has_dmaclk = true,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
.setup_irq = fsl_edma2_irq_init,
};
+static struct fsl_edma_drvdata imx8qm_data = {
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx8qm_audio_data = {
+ .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data3 = {
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data4 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
+ { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
+ { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
+ { .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
+ { .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ struct device_link *link;
+ struct device *pd_chan;
+ struct device *dev;
+ int i;
+
+ dev = &pdev->dev;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ fsl_chan = &fsl_edma->chans[i];
+
+ pd_chan = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR_OR_NULL(pd_chan)) {
+ dev_err(dev, "Failed attach pd %d\n", i);
+ return -EINVAL;
+ }
+
+ link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
+ PTR_ERR(link));
+ return -EINVAL;
+ }
+
+ fsl_chan->pd_dev = pd_chan;
+
+ pm_runtime_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
+ pm_runtime_set_active(fsl_chan->pd_dev);
+ }
+
+ return 0;
+}
+
static int fsl_edma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -270,9 +421,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
- struct fsl_edma_chan *fsl_chan;
+ u32 chan_mask[2] = {0, 0};
struct edma_regs *regs;
- int len, chans;
+ int chans;
int ret, i;
if (of_id)
@@ -288,8 +439,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
- len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
- fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
+ GFP_KERNEL);
if (!fsl_edma)
return -ENOMEM;
@@ -301,26 +452,42 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
- fsl_edma_setup_regs(fsl_edma);
- regs = &fsl_edma->regs;
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+ }
- if (drvdata->has_dmaclk) {
- fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
+ fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
if (IS_ERR(fsl_edma->dmaclk)) {
dev_err(&pdev->dev, "Missing DMA block clock.\n");
return PTR_ERR(fsl_edma->dmaclk);
}
+ }
- ret = clk_prepare_enable(fsl_edma->dmaclk);
- if (ret) {
- dev_err(&pdev->dev, "DMA clk block failed.\n");
- return ret;
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
+ fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
+ if (IS_ERR(fsl_edma->chclk)) {
+ dev_err(&pdev->dev, "Missing MP block clock.\n");
+ return PTR_ERR(fsl_edma->chclk);
}
}
+ ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
+
+ if (ret > 0) {
+ fsl_edma->chan_masked = chan_mask[1];
+ fsl_edma->chan_masked <<= 32;
+ fsl_edma->chan_masked |= chan_mask[0];
+ }
+
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
+ /* eDMAv3 mux register move to TCD area if ch_mux exist */
+ if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
+ break;
+
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -330,26 +497,32 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
sprintf(clkname, "dmamux%d", i);
- fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+ fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
/* on error: disable all previously enabled clks */
- fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
-
- ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret)
- /* on error: disable all previously enabled clks */
- fsl_disable_clocks(fsl_edma, i);
-
}
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
+ ret = fsl_edma3_attach_pd(pdev, fsl_edma);
+ if (ret)
+ return ret;
+ }
+
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+ int len;
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
+ dev_name(&pdev->dev), i);
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
@@ -357,13 +530,19 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+
+ len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
+ offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
+ fsl_chan->tcd = fsl_edma->membase
+ + i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
+
+ fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
if (ret)
return ret;
@@ -391,33 +570,47 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+
+ if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
+ fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ }
+
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
+ fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
+
+ fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
+ DMAENGINE_ALIGN_64_BYTES :
+ DMAENGINE_ALIGN_32_BYTES;
- fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
platform_set_drvdata(pdev, fsl_edma);
ret = dma_async_device_register(&fsl_edma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
- ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+ ret = of_dma_controller_register(np,
+ drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ fsl_edma);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
@@ -470,7 +663,7 @@ static int fsl_edma_resume_early(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index eddb2688f23406..a8cc8a4bc6102c 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -13,10 +13,10 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include "virt-dma.h"
#include "fsldma.h"
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index fdf3500d96a9e1..0b9ca93ce3dca7 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -60,9 +60,10 @@
*/
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f8459cc5315dfa..ddcf736d283d87 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -28,9 +28,10 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/fsldma.h>
#include "dmaengine.h"
#include "fsldma.h"
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 6453b5b35bfece..22d6f4e455b797 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -769,8 +769,6 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- memset(addr, 0, size);
-
spin_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 502be9db63f403..e269ca1f486255 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -660,8 +660,6 @@ int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
-int idxd_register_driver(void);
-void idxd_unregister_driver(void);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
@@ -673,8 +671,6 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
-int idxd_register_idxd_drv(void);
-void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq);
@@ -719,7 +715,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type, bool free_desc);
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index d73004f47cf4b4..fdda6d60426295 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -245,12 +245,11 @@ static void perfmon_pmu_event_update(struct perf_event *event)
int shift = 64 - idxd->idxd_pmu->counter_width;
struct hw_perf_event *hwc = &event->hw;
+ prev_raw_count = local64_read(&hwc->prev_count);
do {
- prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = perfmon_pmu_read_counter(event);
- } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count);
-
+ } while (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count));
n = (new_raw_count << shift);
p = (prev_raw_count << shift);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 63f6966c51aa17..7caba90d85b31e 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1088,8 +1088,8 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (!idxd->hw.wq_cap.wq_ats_support)
- return -EOPNOTSUPP;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
rc = kstrtobool(buf, &ats_dis);
if (rc < 0)
@@ -1124,8 +1124,8 @@ static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (!idxd->hw.wq_cap.wq_prs_support)
- return -EOPNOTSUPP;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
rc = kstrtobool(buf, &prs_dis);
if (rc < 0)
@@ -1281,12 +1281,9 @@ static struct attribute *idxd_wq_attributes[] = {
NULL,
};
-static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
- struct idxd_device *idxd)
-{
- return attr == &dev_attr_wq_op_config.attr &&
- !idxd->hw.wq_cap.op_config;
-}
+/* A WQ attr is invisible if the feature is not supported in WQCAP. */
+#define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
+ ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
struct idxd_device *idxd)
@@ -1296,13 +1293,6 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
idxd->data->type == IDXD_TYPE_IAX;
}
-static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
- struct idxd_device *idxd)
-{
- return attr == &dev_attr_wq_prs_disable.attr &&
- !idxd->hw.wq_cap.wq_prs_support;
-}
-
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
@@ -1310,13 +1300,16 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
- if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
return 0;
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
return 0;
- if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
+ if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
+ return 0;
+
+ if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
return 0;
return attr->mode;
@@ -1473,7 +1466,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
+ return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
}
static DEVICE_ATTR_RO(pasid_enabled);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index ad084552640ffd..9be0d3226e1992 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -17,7 +17,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f040751690af1f..114f254b9f5057 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -21,7 +21,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <asm/irq.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7a912f90c2a9a9..51012bd399002d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -31,7 +31,6 @@
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/workqueue.h>
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 289c59ed74b977..17f6b636711360 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -51,7 +51,7 @@
/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
- return (pci->bus->number << 8) | pci->devfn;
+ return pci_dev_id(pci);
}
static int dca_enabled_in_bios(struct pci_dev *pdev)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 35e06b3826034a..a180171087a872 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -74,6 +74,7 @@ struct ioatdma_device {
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
+ int chancnt;
/* shadow version for CB3.3 chan reset errata workaround */
u64 msixtba0;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index c4602bfc9c7491..9c364e92cb828d 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -420,7 +420,7 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
msix:
/* The number of MSI-X vectors should equal the number of channels */
- msixcnt = ioat_dma->dma_dev.chancnt;
+ msixcnt = ioat_dma->chancnt;
for (i = 0; i < msixcnt; i++)
ioat_dma->msix_entries[i].entry = i;
@@ -511,7 +511,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->dev = &pdev->dev;
- if (!dma->chancnt) {
+ if (!ioat_dma->chancnt) {
dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
@@ -567,15 +567,16 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
struct device *dev = &ioat_dma->pdev->dev;
struct dma_device *dma = &ioat_dma->dma_dev;
u8 xfercap_log;
+ int chancnt;
int i;
INIT_LIST_HEAD(&dma->channels);
- dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
- dma->chancnt &= 0x1f; /* bits [4:0] valid */
- if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+ chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+ chancnt &= 0x1f; /* bits [4:0] valid */
+ if (chancnt > ARRAY_SIZE(ioat_dma->idx)) {
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
- dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
- dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+ chancnt, ARRAY_SIZE(ioat_dma->idx));
+ chancnt = ARRAY_SIZE(ioat_dma->idx);
}
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
@@ -583,7 +584,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
- for (i = 0; i < dma->chancnt; i++) {
+ for (i = 0; i < chancnt; i++) {
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@@ -596,7 +597,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
break;
}
}
- dma->chancnt = i;
+ ioat_dma->chancnt = i;
}
/**
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile
deleted file mode 100644
index c79ff116daf610..00000000000000
--- a/drivers/dma/ipu/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y += ipu_irq.o ipu_idmac.o
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
deleted file mode 100644
index d799b99c18bdaf..00000000000000
--- a/drivers/dma/ipu/ipu_idmac.c
+++ /dev/null
@@ -1,1801 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/dma/ipu-dma.h>
-
-#include "../dmaengine.h"
-#include "ipu_intern.h"
-
-#define FS_VF_IN_VALID 0x00000002
-#define FS_ENC_IN_VALID 0x00000001
-
-static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
- bool wait_for_stop);
-
-/*
- * There can be only one, we could allocate it dynamically, but then we'd have
- * to add an extra parameter to some functions, and use something as ugly as
- * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
- * in the ISR
- */
-static struct ipu ipu_data;
-
-#define to_ipu(id) container_of(id, struct ipu, idmac)
-
-static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ic + reg);
-}
-
-#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
-
-static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ic + reg);
-}
-
-#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
-
-static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ipu + reg);
-}
-
-static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ipu + reg);
-}
-
-/*****************************************************************************
- * IPU / IC common functions
- */
-static void dump_idmac_reg(struct ipu *ipu)
-{
- dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
- "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
- idmac_read_icreg(ipu, IDMAC_CONF),
- idmac_read_icreg(ipu, IC_CONF),
- idmac_read_icreg(ipu, IDMAC_CHA_EN),
- idmac_read_icreg(ipu, IDMAC_CHA_PRI),
- idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
- dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
- "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
- idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
- idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
- idmac_read_ipureg(ipu, IPU_TASKS_STAT));
-}
-
-static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
-{
- switch (fmt) {
- case IPU_PIX_FMT_GENERIC: /* generic data */
- case IPU_PIX_FMT_RGB332:
- case IPU_PIX_FMT_YUV420P:
- case IPU_PIX_FMT_YUV422P:
- default:
- return 1;
- case IPU_PIX_FMT_RGB565:
- case IPU_PIX_FMT_YUYV:
- case IPU_PIX_FMT_UYVY:
- return 2;
- case IPU_PIX_FMT_BGR24:
- case IPU_PIX_FMT_RGB24:
- return 3;
- case IPU_PIX_FMT_GENERIC_32: /* generic data */
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_RGB32:
- case IPU_PIX_FMT_ABGR32:
- return 4;
- }
-}
-
-/* Enable direct write to memory by the Camera Sensor Interface */
-static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t ic_conf, mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- mask = IC_CONF_PRPENC_EN;
- break;
- case IDMAC_IC_7:
- mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
- break;
- default:
- return;
- }
- ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-}
-
-/* Called under spin_lock_irqsave(&ipu_data.lock) */
-static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t ic_conf, mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- mask = IC_CONF_PRPENC_EN;
- break;
- case IDMAC_IC_7:
- mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
- break;
- default:
- return;
- }
- ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-}
-
-static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t stat = TASK_STAT_IDLE;
- uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
-
- switch (channel) {
- case IDMAC_IC_7:
- stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
- TSTAT_CSI2MEM_OFFSET;
- break;
- case IDMAC_IC_0:
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- default:
- break;
- }
- return stat;
-}
-
-struct chan_param_mem_planar {
- /* Word 0 */
- u32 xv:10;
- u32 yv:10;
- u32 xb:12;
-
- u32 yb:12;
- u32 res1:2;
- u32 nsb:1;
- u32 lnpb:6;
- u32 ubo_l:11;
-
- u32 ubo_h:15;
- u32 vbo_l:17;
-
- u32 vbo_h:9;
- u32 res2:3;
- u32 fw:12;
- u32 fh_l:8;
-
- u32 fh_h:4;
- u32 res3:28;
-
- /* Word 1 */
- u32 eba0;
-
- u32 eba1;
-
- u32 bpp:3;
- u32 sl:14;
- u32 pfs:3;
- u32 bam:3;
- u32 res4:2;
- u32 npb:6;
- u32 res5:1;
-
- u32 sat:2;
- u32 res6:30;
-} __attribute__ ((packed));
-
-struct chan_param_mem_interleaved {
- /* Word 0 */
- u32 xv:10;
- u32 yv:10;
- u32 xb:12;
-
- u32 yb:12;
- u32 sce:1;
- u32 res1:1;
- u32 nsb:1;
- u32 lnpb:6;
- u32 sx:10;
- u32 sy_l:1;
-
- u32 sy_h:9;
- u32 ns:10;
- u32 sm:10;
- u32 sdx_l:3;
-
- u32 sdx_h:2;
- u32 sdy:5;
- u32 sdrx:1;
- u32 sdry:1;
- u32 sdr1:1;
- u32 res2:2;
- u32 fw:12;
- u32 fh_l:8;
-
- u32 fh_h:4;
- u32 res3:28;
-
- /* Word 1 */
- u32 eba0;
-
- u32 eba1;
-
- u32 bpp:3;
- u32 sl:14;
- u32 pfs:3;
- u32 bam:3;
- u32 res4:2;
- u32 npb:6;
- u32 res5:1;
-
- u32 sat:2;
- u32 scc:1;
- u32 ofs0:5;
- u32 ofs1:5;
- u32 ofs2:5;
- u32 ofs3:5;
- u32 wid0:3;
- u32 wid1:3;
- u32 wid2:3;
-
- u32 wid3:3;
- u32 dec_sel:1;
- u32 res6:28;
-} __attribute__ ((packed));
-
-union chan_param_mem {
- struct chan_param_mem_planar pp;
- struct chan_param_mem_interleaved ip;
-};
-
-static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
- u32 u_offset, u32 v_offset)
-{
- params->pp.ubo_l = u_offset & 0x7ff;
- params->pp.ubo_h = u_offset >> 11;
- params->pp.vbo_l = v_offset & 0x1ffff;
- params->pp.vbo_h = v_offset >> 17;
-}
-
-static void ipu_ch_param_set_size(union chan_param_mem *params,
- uint32_t pixel_fmt, uint16_t width,
- uint16_t height, uint16_t stride)
-{
- u32 u_offset;
- u32 v_offset;
-
- params->pp.fw = width - 1;
- params->pp.fh_l = height - 1;
- params->pp.fh_h = (height - 1) >> 8;
- params->pp.sl = stride - 1;
-
- switch (pixel_fmt) {
- case IPU_PIX_FMT_GENERIC:
- /*Represents 8-bit Generic data */
- params->pp.bpp = 3;
- params->pp.pfs = 7;
- params->pp.npb = 31;
- params->pp.sat = 2; /* SAT = use 32-bit access */
- break;
- case IPU_PIX_FMT_GENERIC_32:
- /*Represents 32-bit Generic data */
- params->pp.bpp = 0;
- params->pp.pfs = 7;
- params->pp.npb = 7;
- params->pp.sat = 2; /* SAT = use 32-bit access */
- break;
- case IPU_PIX_FMT_RGB565:
- params->ip.bpp = 2;
- params->ip.pfs = 4;
- params->ip.npb = 15;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 0; /* Red bit offset */
- params->ip.ofs1 = 5; /* Green bit offset */
- params->ip.ofs2 = 11; /* Blue bit offset */
- params->ip.ofs3 = 16; /* Alpha bit offset */
- params->ip.wid0 = 4; /* Red bit width - 1 */
- params->ip.wid1 = 5; /* Green bit width - 1 */
- params->ip.wid2 = 4; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_BGR24:
- params->ip.bpp = 1; /* 24 BPP & RGB PFS */
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 0; /* Red bit offset */
- params->ip.ofs1 = 8; /* Green bit offset */
- params->ip.ofs2 = 16; /* Blue bit offset */
- params->ip.ofs3 = 24; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_RGB24:
- params->ip.bpp = 1; /* 24 BPP & RGB PFS */
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 16; /* Red bit offset */
- params->ip.ofs1 = 8; /* Green bit offset */
- params->ip.ofs2 = 0; /* Blue bit offset */
- params->ip.ofs3 = 24; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_BGRA32:
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_ABGR32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 8; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 24; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
- case IPU_PIX_FMT_RGBA32:
- case IPU_PIX_FMT_RGB32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 24; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 8; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
- case IPU_PIX_FMT_UYVY:
- params->ip.bpp = 2;
- params->ip.pfs = 6;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- break;
- case IPU_PIX_FMT_YUV420P2:
- case IPU_PIX_FMT_YUV420P:
- params->ip.bpp = 3;
- params->ip.pfs = 3;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- u_offset = stride * height;
- v_offset = u_offset + u_offset / 4;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- case IPU_PIX_FMT_YVU422P:
- params->ip.bpp = 3;
- params->ip.pfs = 2;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- v_offset = stride * height;
- u_offset = v_offset + v_offset / 2;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- case IPU_PIX_FMT_YUV422P:
- params->ip.bpp = 3;
- params->ip.pfs = 2;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- u_offset = stride * height;
- v_offset = u_offset + u_offset / 2;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- default:
- dev_err(ipu_data.dev,
- "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
- break;
- }
-
- params->pp.nsb = 1;
-}
-
-static void ipu_ch_param_set_buffer(union chan_param_mem *params,
- dma_addr_t buf0, dma_addr_t buf1)
-{
- params->pp.eba0 = buf0;
- params->pp.eba1 = buf1;
-}
-
-static void ipu_ch_param_set_rotation(union chan_param_mem *params,
- enum ipu_rotate_mode rotate)
-{
- params->pp.bam = rotate;
-}
-
-static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
- uint32_t num_words)
-{
- for (; num_words > 0; num_words--) {
- dev_dbg(ipu_data.dev,
- "write param mem - addr = 0x%08X, data = 0x%08X\n",
- addr, *data);
- idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
- addr++;
- if ((addr & 0x7) == 5) {
- addr &= ~0x7; /* set to word 0 */
- addr += 8; /* increment to next row */
- }
- }
-}
-
-static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
- uint32_t *resize_coeff,
- uint32_t *downsize_coeff)
-{
- uint32_t temp_size;
- uint32_t temp_downsize;
-
- *resize_coeff = 1 << 13;
- *downsize_coeff = 1 << 13;
-
- /* Cannot downsize more than 8:1 */
- if (out_size << 3 < in_size)
- return -EINVAL;
-
- /* compute downsizing coefficient */
- temp_downsize = 0;
- temp_size = in_size;
- while (temp_size >= out_size * 2 && temp_downsize < 2) {
- temp_size >>= 1;
- temp_downsize++;
- }
- *downsize_coeff = temp_downsize;
-
- /*
- * compute resizing coefficient using the following formula:
- * resize_coeff = M*(SI -1)/(SO - 1)
- * where M = 2^13, SI - input size, SO - output size
- */
- *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
- if (*resize_coeff >= 16384L) {
- dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
- *resize_coeff = 0x3FFF;
- }
-
- dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
- "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
- *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
- ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
-
- return 0;
-}
-
-static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
-{
- switch (fmt) {
- case IPU_PIX_FMT_RGB565:
- case IPU_PIX_FMT_BGR24:
- case IPU_PIX_FMT_RGB24:
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_RGB32:
- return IPU_COLORSPACE_RGB;
- default:
- return IPU_COLORSPACE_YCBCR;
- }
-}
-
-static int ipu_ic_init_prpenc(struct ipu *ipu,
- union ipu_channel_param *params, bool src_is_csi)
-{
- uint32_t reg, ic_conf;
- uint32_t downsize_coeff, resize_coeff;
- enum ipu_color_space in_fmt, out_fmt;
-
- /* Setup vertical resizing */
- calc_resize_coeffs(params->video.in_height,
- params->video.out_height,
- &resize_coeff, &downsize_coeff);
- reg = (downsize_coeff << 30) | (resize_coeff << 16);
-
- /* Setup horizontal resizing */
- calc_resize_coeffs(params->video.in_width,
- params->video.out_width,
- &resize_coeff, &downsize_coeff);
- reg |= (downsize_coeff << 14) | resize_coeff;
-
- /* Setup color space conversion */
- in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
- out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
-
- /*
- * Colourspace conversion unsupported yet - see _init_csc() in
- * Freescale sources
- */
- if (in_fmt != out_fmt) {
- dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
- return -EOPNOTSUPP;
- }
-
- idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
-
- ic_conf = idmac_read_icreg(ipu, IC_CONF);
-
- if (src_is_csi)
- ic_conf &= ~IC_CONF_RWS_EN;
- else
- ic_conf |= IC_CONF_RWS_EN;
-
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-
- return 0;
-}
-
-static uint32_t dma_param_addr(uint32_t dma_ch)
-{
- /* Channel Parameter Memory */
- return 0x10000 | (dma_ch << 4);
-}
-
-static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
- bool prio)
-{
- u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
-
- if (prio)
- reg |= 1UL << channel;
- else
- reg &= ~(1UL << channel);
-
- idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
-
- dump_idmac_reg(ipu);
-}
-
-static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
-{
- uint32_t mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- case IDMAC_IC_7:
- mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
- break;
- default:
- mask = 0;
- break;
- }
-
- return mask;
-}
-
-/**
- * ipu_enable_channel() - enable an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: IDMAC channel.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- struct ipu *ipu = to_ipu(idmac);
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- uint32_t reg;
- unsigned long flags;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- /* Reset to buffer 0 */
- idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
- ichan->active_buffer = 0;
- ichan->status = IPU_CHANNEL_ENABLED;
-
- switch (channel) {
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- case IDMAC_IC_7:
- ipu_channel_set_priority(ipu, channel, true);
- break;
- default:
- break;
- }
-
- reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
-
- idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
-
- ipu_ic_enable_task(ipu, channel);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
- return 0;
-}
-
-/**
- * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
- * @ichan: IDMAC channel.
- * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
- * @width: width of buffer in pixels.
- * @height: height of buffer in pixels.
- * @stride: stride length of buffer in pixels.
- * @rot_mode: rotation mode of buffer. A rotation setting other than
- * IPU_ROTATE_VERT_FLIP should only be used for input buffers of
- * rotation channels.
- * @phyaddr_0: buffer 0 physical address.
- * @phyaddr_1: buffer 1 physical address. Setting this to a value other than
- * NULL enables double buffering mode.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_init_channel_buffer(struct idmac_channel *ichan,
- enum pixel_fmt pixel_fmt,
- uint16_t width, uint16_t height,
- uint32_t stride,
- enum ipu_rotate_mode rot_mode,
- dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- struct idmac *idmac = to_idmac(ichan->dma_chan.device);
- struct ipu *ipu = to_ipu(idmac);
- union chan_param_mem params = {};
- unsigned long flags;
- uint32_t reg;
- uint32_t stride_bytes;
-
- stride_bytes = stride * bytes_per_pixel(pixel_fmt);
-
- if (stride_bytes % 4) {
- dev_err(ipu->dev,
- "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
- stride, stride_bytes);
- return -EINVAL;
- }
-
- /* IC channel's stride must be a multiple of 8 pixels */
- if ((channel <= IDMAC_IC_13) && (stride % 8)) {
- dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
- return -EINVAL;
- }
-
- /* Build parameter memory data for DMA channel */
- ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
- ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
- ipu_ch_param_set_rotation(&params, rot_mode);
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
-
- reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
-
- if (phyaddr_1)
- reg |= 1UL << channel;
- else
- reg &= ~(1UL << channel);
-
- idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
-
- ichan->status = IPU_CHANNEL_READY;
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- return 0;
-}
-
-/**
- * ipu_select_buffer() - mark a channel's buffer as ready.
- * @channel: channel ID.
- * @buffer_n: buffer number to mark ready.
- */
-static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
-{
- /* No locking - this is a write-one-to-set register, cleared by IPU */
- if (buffer_n == 0)
- /* Mark buffer 0 as ready. */
- idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
- else
- /* Mark buffer 1 as ready. */
- idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
-}
-
-/**
- * ipu_update_channel_buffer() - update physical address of a channel buffer.
- * @ichan: IDMAC channel.
- * @buffer_n: buffer number to update.
- * 0 or 1 are the only valid values.
- * @phyaddr: buffer physical address.
- */
-/* Called under spin_lock(_irqsave)(&ichan->lock) */
-static void ipu_update_channel_buffer(struct idmac_channel *ichan,
- int buffer_n, dma_addr_t phyaddr)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- uint32_t reg;
- unsigned long flags;
-
- spin_lock_irqsave(&ipu_data.lock, flags);
-
- if (buffer_n == 0) {
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
- if (reg & (1UL << channel)) {
- ipu_ic_disable_task(&ipu_data, channel);
- ichan->status = IPU_CHANNEL_READY;
- }
-
- /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
- idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
- 0x0008UL, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
- } else {
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
- if (reg & (1UL << channel)) {
- ipu_ic_disable_task(&ipu_data, channel);
- ichan->status = IPU_CHANNEL_READY;
- }
-
- /* Check if double-buffering is already enabled */
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
-
- if (!(reg & (1UL << channel)))
- idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
- IPU_CHA_DB_MODE_SEL);
-
- /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
- idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
- 0x0009UL, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
- }
-
- spin_unlock_irqrestore(&ipu_data.lock, flags);
-}
-
-/* Called under spin_lock_irqsave(&ichan->lock) */
-static int ipu_submit_buffer(struct idmac_channel *ichan,
- struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
-{
- unsigned int chan_id = ichan->dma_chan.chan_id;
- struct device *dev = &ichan->dma_chan.dev->device;
-
- if (async_tx_test_ack(&desc->txd))
- return -EINTR;
-
- /*
- * On first invocation this shouldn't be necessary, the call to
- * ipu_init_channel_buffer() above will set addresses for us, so we
- * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
- * doing it again shouldn't hurt either.
- */
- ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
-
- ipu_select_buffer(chan_id, buf_idx);
- dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
- sg, chan_id, buf_idx);
-
- return 0;
-}
-
-/* Called under spin_lock_irqsave(&ichan->lock) */
-static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
- struct idmac_tx_desc *desc)
-{
- struct scatterlist *sg;
- int i, ret = 0;
-
- for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
- if (!ichan->sg[i]) {
- ichan->sg[i] = sg;
-
- ret = ipu_submit_buffer(ichan, desc, sg, i);
- if (ret < 0)
- return ret;
-
- sg = sg_next(sg);
- }
- }
-
- return ret;
-}
-
-static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct idmac_tx_desc *desc = to_tx_desc(tx);
- struct idmac_channel *ichan = to_idmac_chan(tx->chan);
- struct idmac *idmac = to_idmac(tx->chan->device);
- struct ipu *ipu = to_ipu(idmac);
- struct device *dev = &ichan->dma_chan.dev->device;
- dma_cookie_t cookie;
- unsigned long flags;
- int ret;
-
- /* Sanity check */
- if (!list_empty(&desc->list)) {
- /* The descriptor doesn't belong to client */
- dev_err(dev, "Descriptor %p not prepared!\n", tx);
- return -EBUSY;
- }
-
- mutex_lock(&ichan->chan_mutex);
-
- async_tx_clear_ack(tx);
-
- if (ichan->status < IPU_CHANNEL_READY) {
- struct idmac_video_param *video = &ichan->params.video;
- /*
- * Initial buffer assignment - the first two sg-entries from
- * the descriptor will end up in the IDMAC buffers
- */
- dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
- sg_dma_address(&desc->sg[1]);
-
- WARN_ON(ichan->sg[0] || ichan->sg[1]);
-
- cookie = ipu_init_channel_buffer(ichan,
- video->out_pixel_fmt,
- video->out_width,
- video->out_height,
- video->out_stride,
- IPU_ROTATE_NONE,
- sg_dma_address(&desc->sg[0]),
- dma_1);
- if (cookie < 0)
- goto out;
- }
-
- dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
-
- cookie = dma_cookie_assign(tx);
-
- /* ipu->lock can be taken under ichan->lock, but not v.v. */
- spin_lock_irqsave(&ichan->lock, flags);
-
- list_add_tail(&desc->list, &ichan->queue);
- /* submit_buffers() atomically verifies and fills empty sg slots */
- ret = ipu_submit_channel_buffers(ichan, desc);
-
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- if (ret < 0) {
- cookie = ret;
- goto dequeue;
- }
-
- if (ichan->status < IPU_CHANNEL_ENABLED) {
- ret = ipu_enable_channel(idmac, ichan);
- if (ret < 0) {
- cookie = ret;
- goto dequeue;
- }
- }
-
- dump_idmac_reg(ipu);
-
-dequeue:
- if (cookie < 0) {
- spin_lock_irqsave(&ichan->lock, flags);
- list_del_init(&desc->list);
- spin_unlock_irqrestore(&ichan->lock, flags);
- tx->cookie = cookie;
- ichan->dma_chan.cookie = cookie;
- }
-
-out:
- mutex_unlock(&ichan->chan_mutex);
-
- return cookie;
-}
-
-/* Called with ichan->chan_mutex held */
-static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
-{
- struct idmac_tx_desc *desc =
- vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
- struct idmac *idmac = to_idmac(ichan->dma_chan.device);
-
- if (!desc)
- return -ENOMEM;
-
- /* No interrupts, just disable the tasklet for a moment */
- tasklet_disable(&to_ipu(idmac)->tasklet);
-
- ichan->n_tx_desc = n;
- ichan->desc = desc;
- INIT_LIST_HEAD(&ichan->queue);
- INIT_LIST_HEAD(&ichan->free_list);
-
- while (n--) {
- struct dma_async_tx_descriptor *txd = &desc->txd;
-
- memset(txd, 0, sizeof(*txd));
- dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
- txd->tx_submit = idmac_tx_submit;
-
- list_add(&desc->list, &ichan->free_list);
-
- desc++;
- }
-
- tasklet_enable(&to_ipu(idmac)->tasklet);
-
- return 0;
-}
-
-/**
- * ipu_init_channel() - initialize an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: pointer to the channel object.
- * @return 0 on success or negative error code on failure.
- */
-static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- union ipu_channel_param *params = &ichan->params;
- uint32_t ipu_conf;
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- unsigned long flags;
- uint32_t reg;
- struct ipu *ipu = to_ipu(idmac);
- int ret = 0, n_desc = 0;
-
- dev_dbg(ipu->dev, "init channel = %d\n", channel);
-
- if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
- channel != IDMAC_IC_7)
- return -EINVAL;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- switch (channel) {
- case IDMAC_IC_7:
- n_desc = 16;
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
- break;
- case IDMAC_IC_0:
- n_desc = 16;
- reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
- idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
- ret = ipu_ic_init_prpenc(ipu, params, true);
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- n_desc = 4;
- break;
- default:
- break;
- }
-
- ipu->channel_init_mask |= 1L << channel;
-
- /* Enable IPU sub module */
- ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
- ipu_channel_conf_mask(channel);
- idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- if (n_desc && !ichan->desc)
- ret = idmac_desc_alloc(ichan, n_desc);
-
- dump_idmac_reg(ipu);
-
- return ret;
-}
-
-/**
- * ipu_uninit_channel() - uninitialize an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: pointer to the channel object.
- */
-static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- unsigned long flags;
- uint32_t reg;
- unsigned long chan_mask = 1UL << channel;
- uint32_t ipu_conf;
- struct ipu *ipu = to_ipu(idmac);
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- if (!(ipu->channel_init_mask & chan_mask)) {
- dev_err(ipu->dev, "Channel already uninitialized %d\n",
- channel);
- spin_unlock_irqrestore(&ipu->lock, flags);
- return;
- }
-
- /* Reset the double buffer */
- reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
- idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
-
- ichan->sec_chan_en = false;
-
- switch (channel) {
- case IDMAC_IC_7:
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
- IC_CONF);
- break;
- case IDMAC_IC_0:
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
- IC_CONF);
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- default:
- break;
- }
-
- ipu->channel_init_mask &= ~(1L << channel);
-
- ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
- ~ipu_channel_conf_mask(channel);
- idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- ichan->n_tx_desc = 0;
- vfree(ichan->desc);
- ichan->desc = NULL;
-}
-
-/**
- * ipu_disable_channel() - disable an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: channel object pointer.
- * @wait_for_stop: flag to set whether to wait for channel end of frame or
- * return immediately.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
- bool wait_for_stop)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- struct ipu *ipu = to_ipu(idmac);
- uint32_t reg;
- unsigned long flags;
- unsigned long chan_mask = 1UL << channel;
- unsigned int timeout;
-
- if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
- timeout = 40;
- /* This waiting always fails. Related to spurious irq problem */
- while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
- (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
- timeout--;
- msleep(10);
-
- if (!timeout) {
- dev_dbg(ipu->dev,
- "Warning: timeout waiting for channel %u to "
- "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
- "busy = 0x%08X, tstat = 0x%08X\n", channel,
- idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
- idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
- idmac_read_ipureg(ipu, IPU_TASKS_STAT));
- break;
- }
- }
- dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
- }
- /* SDC BG and FG must be disabled before DMA is disabled */
- if (wait_for_stop && (channel == IDMAC_SDC_0 ||
- channel == IDMAC_SDC_1)) {
- for (timeout = 5;
- timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
- msleep(5);
- }
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- /* Disable IC task */
- ipu_ic_disable_task(ipu, channel);
-
- /* Disable DMA channel(s) */
- reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
- idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- return 0;
-}
-
-static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
- struct idmac_tx_desc **desc, struct scatterlist *sg)
-{
- struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
-
- if (sgnew)
- /* next sg-element in this list */
- return sgnew;
-
- if ((*desc)->list.next == &ichan->queue)
- /* No more descriptors on the queue */
- return NULL;
-
- /* Fetch next descriptor */
- *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
- return (*desc)->sg;
-}
-
-/*
- * We have several possibilities here:
- * current BUF next BUF
- *
- * not last sg next not last sg
- * not last sg next last sg
- * last sg first sg from next descriptor
- * last sg NULL
- *
- * Besides, the descriptor queue might be empty or not. We process all these
- * cases carefully.
- */
-static irqreturn_t idmac_interrupt(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- struct device *dev = &ichan->dma_chan.dev->device;
- unsigned int chan_id = ichan->dma_chan.chan_id;
- struct scatterlist **sg, *sgnext, *sgnew = NULL;
- /* Next transfer descriptor */
- struct idmac_tx_desc *desc, *descnew;
- bool done = false;
- u32 ready0, ready1, curbuf, err;
- struct dmaengine_desc_callback cb;
-
- /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
-
- dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
-
- spin_lock(&ipu_data.lock);
-
- ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
- ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
- curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
- err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
-
- if (err & (1 << chan_id)) {
- idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
- spin_unlock(&ipu_data.lock);
- /*
- * Doing this
- * ichan->sg[0] = ichan->sg[1] = NULL;
- * you can force channel re-enable on the next tx_submit(), but
- * this is dirty - think about descriptors with multiple
- * sg elements.
- */
- dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
- chan_id, ready0, ready1, curbuf);
- return IRQ_HANDLED;
- }
- spin_unlock(&ipu_data.lock);
-
- /* Other interrupts do not interfere with this channel */
- spin_lock(&ichan->lock);
- if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
- (!ichan->active_buffer && (ready0 >> chan_id) & 1)
- )) {
- spin_unlock(&ichan->lock);
- dev_dbg(dev,
- "IRQ with active buffer still ready on channel %x, "
- "active %d, ready %x, %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1);
- return IRQ_NONE;
- }
-
- if (unlikely(list_empty(&ichan->queue))) {
- ichan->sg[ichan->active_buffer] = NULL;
- spin_unlock(&ichan->lock);
- dev_err(dev,
- "IRQ without queued buffers on channel %x, active %d, "
- "ready %x, %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1);
- return IRQ_NONE;
- }
-
- /*
- * active_buffer is a software flag, it shows which buffer we are
- * currently expecting back from the hardware, IDMAC should be
- * processing the other buffer already
- */
- sg = &ichan->sg[ichan->active_buffer];
- sgnext = ichan->sg[!ichan->active_buffer];
-
- if (!*sg) {
- spin_unlock(&ichan->lock);
- return IRQ_HANDLED;
- }
-
- desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
- descnew = desc;
-
- dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
- irq, (u64)sg_dma_address(*sg),
- sgnext ? (u64)sg_dma_address(sgnext) : 0,
- ichan->active_buffer, curbuf);
-
- /* Find the descriptor of sgnext */
- sgnew = idmac_sg_next(ichan, &descnew, *sg);
- if (sgnext != sgnew)
- dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
-
- /*
- * if sgnext == NULL sg must be the last element in a scatterlist and
- * queue must be empty
- */
- if (unlikely(!sgnext)) {
- if (!WARN_ON(sg_next(*sg)))
- dev_dbg(dev, "Underrun on channel %x\n", chan_id);
- ichan->sg[!ichan->active_buffer] = sgnew;
-
- if (unlikely(sgnew)) {
- ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
- } else {
- spin_lock(&ipu_data.lock);
- ipu_ic_disable_task(&ipu_data, chan_id);
- spin_unlock(&ipu_data.lock);
- ichan->status = IPU_CHANNEL_READY;
- /* Continue to check for complete descriptor */
- }
- }
-
- /* Calculate and submit the next sg element */
- sgnew = idmac_sg_next(ichan, &descnew, sgnew);
-
- if (unlikely(!sg_next(*sg)) || !sgnext) {
- /*
- * Last element in scatterlist done, remove from the queue,
- * _init for debugging
- */
- list_del_init(&desc->list);
- done = true;
- }
-
- *sg = sgnew;
-
- if (likely(sgnew) &&
- ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- dmaengine_desc_get_callback(&descnew->txd, &cb);
-
- list_del_init(&descnew->list);
- spin_unlock(&ichan->lock);
-
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock(&ichan->lock);
- }
-
- /* Flip the active buffer - even if update above failed */
- ichan->active_buffer = !ichan->active_buffer;
- if (done)
- dma_cookie_complete(&desc->txd);
-
- dmaengine_desc_get_callback(&desc->txd, &cb);
-
- spin_unlock(&ichan->lock);
-
- if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
- dmaengine_desc_callback_invoke(&cb, NULL);
-
- return IRQ_HANDLED;
-}
-
-static void ipu_gc_tasklet(struct tasklet_struct *t)
-{
- struct ipu *ipu = from_tasklet(ipu, t, tasklet);
- int i;
-
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
- struct idmac_tx_desc *desc;
- unsigned long flags;
- struct scatterlist *sg;
- int j, k;
-
- for (j = 0; j < ichan->n_tx_desc; j++) {
- desc = ichan->desc + j;
- spin_lock_irqsave(&ichan->lock, flags);
- if (async_tx_test_ack(&desc->txd)) {
- list_move(&desc->list, &ichan->free_list);
- for_each_sg(desc->sg, sg, desc->sg_len, k) {
- if (ichan->sg[0] == sg)
- ichan->sg[0] = NULL;
- else if (ichan->sg[1] == sg)
- ichan->sg[1] = NULL;
- }
- async_tx_clear_ack(&desc->txd);
- }
- spin_unlock_irqrestore(&ichan->lock, flags);
- }
- }
-}
-
-/* Allocate and initialise a transfer descriptor. */
-static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
- struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long tx_flags,
- void *context)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac_tx_desc *desc = NULL;
- struct dma_async_tx_descriptor *txd = NULL;
- unsigned long flags;
-
- /* We only can handle these three channels so far */
- if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
- chan->chan_id != IDMAC_IC_7)
- return NULL;
-
- if (!is_slave_direction(direction)) {
- dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
- return NULL;
- }
-
- mutex_lock(&ichan->chan_mutex);
-
- spin_lock_irqsave(&ichan->lock, flags);
- if (!list_empty(&ichan->free_list)) {
- desc = list_entry(ichan->free_list.next,
- struct idmac_tx_desc, list);
-
- list_del_init(&desc->list);
-
- desc->sg_len = sg_len;
- desc->sg = sgl;
- txd = &desc->txd;
- txd->flags = tx_flags;
- }
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- mutex_unlock(&ichan->chan_mutex);
-
- tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
-
- return txd;
-}
-
-/* Re-select the current buffer and re-activate the channel */
-static void idmac_issue_pending(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- unsigned long flags;
-
- /* This is not always needed, but doesn't hurt either */
- spin_lock_irqsave(&ipu->lock, flags);
- ipu_select_buffer(chan->chan_id, ichan->active_buffer);
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- /*
- * Might need to perform some parts of initialisation from
- * ipu_enable_channel(), but not all, we do not want to reset to buffer
- * 0, don't need to set priority again either, but re-enabling the task
- * and the channel might be a good idea.
- */
-}
-
-static int idmac_pause(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- struct list_head *list, *tmp;
- unsigned long flags;
-
- mutex_lock(&ichan->chan_mutex);
-
- spin_lock_irqsave(&ipu->lock, flags);
- ipu_ic_disable_task(ipu, chan->chan_id);
-
- /* Return all descriptors into "prepared" state */
- list_for_each_safe(list, tmp, &ichan->queue)
- list_del_init(list);
-
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- mutex_unlock(&ichan->chan_mutex);
-
- return 0;
-}
-
-static int __idmac_terminate_all(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- unsigned long flags;
- int i;
-
- ipu_disable_channel(idmac, ichan,
- ichan->status >= IPU_CHANNEL_ENABLED);
-
- tasklet_disable(&ipu->tasklet);
-
- /* ichan->queue is modified in ISR, have to spinlock */
- spin_lock_irqsave(&ichan->lock, flags);
- list_splice_init(&ichan->queue, &ichan->free_list);
-
- if (ichan->desc)
- for (i = 0; i < ichan->n_tx_desc; i++) {
- struct idmac_tx_desc *desc = ichan->desc + i;
- if (list_empty(&desc->list))
- /* Descriptor was prepared, but not submitted */
- list_add(&desc->list, &ichan->free_list);
-
- async_tx_clear_ack(&desc->txd);
- }
-
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- tasklet_enable(&ipu->tasklet);
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- return 0;
-}
-
-static int idmac_terminate_all(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- int ret;
-
- mutex_lock(&ichan->chan_mutex);
-
- ret = __idmac_terminate_all(chan);
-
- mutex_unlock(&ichan->chan_mutex);
-
- return ret;
-}
-
-#ifdef DEBUG
-static irqreturn_t ic_sof_irq(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
- irq, ichan->dma_chan.chan_id);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ic_eof_irq(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
- irq, ichan->dma_chan.chan_id);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static int ic_sof = -EINVAL, ic_eof = -EINVAL;
-#endif
-
-static int idmac_alloc_chan_resources(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- int ret;
-
- /* dmaengine.c now guarantees to only offer free channels */
- BUG_ON(chan->client_count > 1);
- WARN_ON(ichan->status != IPU_CHANNEL_FREE);
-
- dma_cookie_init(chan);
-
- ret = ipu_irq_map(chan->chan_id);
- if (ret < 0)
- goto eimap;
-
- ichan->eof_irq = ret;
-
- /*
- * Important to first disable the channel, because maybe someone
- * used it before us, e.g., the bootloader
- */
- ipu_disable_channel(idmac, ichan, true);
-
- ret = ipu_init_channel(idmac, ichan);
- if (ret < 0)
- goto eichan;
-
- ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
- ichan->eof_name, ichan);
- if (ret < 0)
- goto erirq;
-
-#ifdef DEBUG
- if (chan->chan_id == IDMAC_IC_7) {
- ic_sof = ipu_irq_map(69);
- if (ic_sof > 0) {
- ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
- if (ret)
- dev_err(&chan->dev->device, "request irq failed for IC SOF");
- }
- ic_eof = ipu_irq_map(70);
- if (ic_eof > 0) {
- ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
- if (ret)
- dev_err(&chan->dev->device, "request irq failed for IC EOF");
- }
- }
-#endif
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
- chan->chan_id, ichan->eof_irq);
-
- return ret;
-
-erirq:
- ipu_uninit_channel(idmac, ichan);
-eichan:
- ipu_irq_unmap(chan->chan_id);
-eimap:
- return ret;
-}
-
-static void idmac_free_chan_resources(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
-
- mutex_lock(&ichan->chan_mutex);
-
- __idmac_terminate_all(chan);
-
- if (ichan->status > IPU_CHANNEL_FREE) {
-#ifdef DEBUG
- if (chan->chan_id == IDMAC_IC_7) {
- if (ic_sof > 0) {
- free_irq(ic_sof, ichan);
- ipu_irq_unmap(69);
- ic_sof = -EINVAL;
- }
- if (ic_eof > 0) {
- free_irq(ic_eof, ichan);
- ipu_irq_unmap(70);
- ic_eof = -EINVAL;
- }
- }
-#endif
- free_irq(ichan->eof_irq, ichan);
- ipu_irq_unmap(chan->chan_id);
- }
-
- ichan->status = IPU_CHANNEL_FREE;
-
- ipu_uninit_channel(idmac, ichan);
-
- mutex_unlock(&ichan->chan_mutex);
-
- tasklet_schedule(&to_ipu(idmac)->tasklet);
-}
-
-static enum dma_status idmac_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- return dma_cookie_status(chan, cookie, txstate);
-}
-
-static int __init ipu_idmac_init(struct ipu *ipu)
-{
- struct idmac *idmac = &ipu->idmac;
- struct dma_device *dma = &idmac->dma;
- int i;
-
- dma_cap_set(DMA_SLAVE, dma->cap_mask);
- dma_cap_set(DMA_PRIVATE, dma->cap_mask);
-
- /* Compulsory common fields */
- dma->dev = ipu->dev;
- dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
- dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_tx_status = idmac_tx_status;
- dma->device_issue_pending = idmac_issue_pending;
-
- /* Compulsory for DMA_SLAVE fields */
- dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_pause = idmac_pause;
- dma->device_terminate_all = idmac_terminate_all;
-
- INIT_LIST_HEAD(&dma->channels);
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
- struct dma_chan *dma_chan = &ichan->dma_chan;
-
- spin_lock_init(&ichan->lock);
- mutex_init(&ichan->chan_mutex);
-
- ichan->status = IPU_CHANNEL_FREE;
- ichan->sec_chan_en = false;
- snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
-
- dma_chan->device = &idmac->dma;
- dma_cookie_init(dma_chan);
- dma_chan->chan_id = i;
- list_add_tail(&dma_chan->device_node, &dma->channels);
- }
-
- idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
-
- return dma_async_device_register(&idmac->dma);
-}
-
-static void ipu_idmac_exit(struct ipu *ipu)
-{
- int i;
- struct idmac *idmac = &ipu->idmac;
-
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
-
- idmac_terminate_all(&ichan->dma_chan);
- }
-
- dma_async_device_unregister(&idmac->dma);
-}
-
-/*****************************************************************************
- * IPU common probe / remove
- */
-
-static int __init ipu_probe(struct platform_device *pdev)
-{
- struct resource *mem_ipu, *mem_ic;
- int ret;
-
- spin_lock_init(&ipu_data.lock);
-
- mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!mem_ipu || !mem_ic)
- return -EINVAL;
-
- ipu_data.dev = &pdev->dev;
-
- platform_set_drvdata(pdev, &ipu_data);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- goto err_noirq;
-
- ipu_data.irq_fn = ret;
- ret = platform_get_irq(pdev, 1);
- if (ret < 0)
- goto err_noirq;
-
- ipu_data.irq_err = ret;
-
- dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
- ipu_data.irq_fn, ipu_data.irq_err);
-
- /* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
- if (!ipu_data.reg_ipu) {
- ret = -ENOMEM;
- goto err_ioremap_ipu;
- }
-
- /* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
- if (!ipu_data.reg_ic) {
- ret = -ENOMEM;
- goto err_ioremap_ic;
- }
-
- /* Get IPU clock */
- ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(ipu_data.ipu_clk)) {
- ret = PTR_ERR(ipu_data.ipu_clk);
- goto err_clk_get;
- }
-
- /* Make sure IPU HSP clock is running */
- clk_prepare_enable(ipu_data.ipu_clk);
-
- /* Disable all interrupts */
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
-
- dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
- (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
-
- ret = ipu_irq_attach_irq(&ipu_data, pdev);
- if (ret < 0)
- goto err_attach_irq;
-
- /* Initialize DMA engine */
- ret = ipu_idmac_init(&ipu_data);
- if (ret < 0)
- goto err_idmac_init;
-
- tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
-
- ipu_data.dev = &pdev->dev;
-
- dev_dbg(ipu_data.dev, "IPU initialized\n");
-
- return 0;
-
-err_idmac_init:
-err_attach_irq:
- ipu_irq_detach_irq(&ipu_data, pdev);
- clk_disable_unprepare(ipu_data.ipu_clk);
- clk_put(ipu_data.ipu_clk);
-err_clk_get:
- iounmap(ipu_data.reg_ic);
-err_ioremap_ic:
- iounmap(ipu_data.reg_ipu);
-err_ioremap_ipu:
-err_noirq:
- dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
- return ret;
-}
-
-static int ipu_remove(struct platform_device *pdev)
-{
- struct ipu *ipu = platform_get_drvdata(pdev);
-
- ipu_idmac_exit(ipu);
- ipu_irq_detach_irq(ipu, pdev);
- clk_disable_unprepare(ipu->ipu_clk);
- clk_put(ipu->ipu_clk);
- iounmap(ipu->reg_ic);
- iounmap(ipu->reg_ipu);
- tasklet_kill(&ipu->tasklet);
-
- return 0;
-}
-
-/*
- * We need two MEM resources - with IPU-common and Image Converter registers,
- * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
- */
-static struct platform_driver ipu_platform_driver = {
- .driver = {
- .name = "ipu-core",
- },
- .remove = ipu_remove,
-};
-
-static int __init ipu_init(void)
-{
- return platform_driver_probe(&ipu_platform_driver, ipu_probe);
-}
-subsys_initcall(ipu_init);
-
-MODULE_DESCRIPTION("IPU core driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
-MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h
deleted file mode 100644
index e7ec1dec3edfd9..00000000000000
--- a/drivers/dma/ipu/ipu_intern.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#ifndef _IPU_INTERN_H_
-#define _IPU_INTERN_H_
-
-#include <linux/dmaengine.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-
-/* IPU Common registers */
-#define IPU_CONF 0x00
-#define IPU_CHA_BUF0_RDY 0x04
-#define IPU_CHA_BUF1_RDY 0x08
-#define IPU_CHA_DB_MODE_SEL 0x0C
-#define IPU_CHA_CUR_BUF 0x10
-#define IPU_FS_PROC_FLOW 0x14
-#define IPU_FS_DISP_FLOW 0x18
-#define IPU_TASKS_STAT 0x1C
-#define IPU_IMA_ADDR 0x20
-#define IPU_IMA_DATA 0x24
-#define IPU_INT_CTRL_1 0x28
-#define IPU_INT_CTRL_2 0x2C
-#define IPU_INT_CTRL_3 0x30
-#define IPU_INT_CTRL_4 0x34
-#define IPU_INT_CTRL_5 0x38
-#define IPU_INT_STAT_1 0x3C
-#define IPU_INT_STAT_2 0x40
-#define IPU_INT_STAT_3 0x44
-#define IPU_INT_STAT_4 0x48
-#define IPU_INT_STAT_5 0x4C
-#define IPU_BRK_CTRL_1 0x50
-#define IPU_BRK_CTRL_2 0x54
-#define IPU_BRK_STAT 0x58
-#define IPU_DIAGB_CTRL 0x5C
-
-/* IPU_CONF Register bits */
-#define IPU_CONF_CSI_EN 0x00000001
-#define IPU_CONF_IC_EN 0x00000002
-#define IPU_CONF_ROT_EN 0x00000004
-#define IPU_CONF_PF_EN 0x00000008
-#define IPU_CONF_SDC_EN 0x00000010
-#define IPU_CONF_ADC_EN 0x00000020
-#define IPU_CONF_DI_EN 0x00000040
-#define IPU_CONF_DU_EN 0x00000080
-#define IPU_CONF_PXL_ENDIAN 0x00000100
-
-/* Image Converter Registers */
-#define IC_CONF 0x88
-#define IC_PRP_ENC_RSC 0x8C
-#define IC_PRP_VF_RSC 0x90
-#define IC_PP_RSC 0x94
-#define IC_CMBP_1 0x98
-#define IC_CMBP_2 0x9C
-#define PF_CONF 0xA0
-#define IDMAC_CONF 0xA4
-#define IDMAC_CHA_EN 0xA8
-#define IDMAC_CHA_PRI 0xAC
-#define IDMAC_CHA_BUSY 0xB0
-
-/* Image Converter Register bits */
-#define IC_CONF_PRPENC_EN 0x00000001
-#define IC_CONF_PRPENC_CSC1 0x00000002
-#define IC_CONF_PRPENC_ROT_EN 0x00000004
-#define IC_CONF_PRPVF_EN 0x00000100
-#define IC_CONF_PRPVF_CSC1 0x00000200
-#define IC_CONF_PRPVF_CSC2 0x00000400
-#define IC_CONF_PRPVF_CMB 0x00000800
-#define IC_CONF_PRPVF_ROT_EN 0x00001000
-#define IC_CONF_PP_EN 0x00010000
-#define IC_CONF_PP_CSC1 0x00020000
-#define IC_CONF_PP_CSC2 0x00040000
-#define IC_CONF_PP_CMB 0x00080000
-#define IC_CONF_PP_ROT_EN 0x00100000
-#define IC_CONF_IC_GLB_LOC_A 0x10000000
-#define IC_CONF_KEY_COLOR_EN 0x20000000
-#define IC_CONF_RWS_EN 0x40000000
-#define IC_CONF_CSI_MEM_WR_EN 0x80000000
-
-#define IDMA_CHAN_INVALID 0x000000FF
-#define IDMA_IC_0 0x00000001
-#define IDMA_IC_1 0x00000002
-#define IDMA_IC_2 0x00000004
-#define IDMA_IC_3 0x00000008
-#define IDMA_IC_4 0x00000010
-#define IDMA_IC_5 0x00000020
-#define IDMA_IC_6 0x00000040
-#define IDMA_IC_7 0x00000080
-#define IDMA_IC_8 0x00000100
-#define IDMA_IC_9 0x00000200
-#define IDMA_IC_10 0x00000400
-#define IDMA_IC_11 0x00000800
-#define IDMA_IC_12 0x00001000
-#define IDMA_IC_13 0x00002000
-#define IDMA_SDC_BG 0x00004000
-#define IDMA_SDC_FG 0x00008000
-#define IDMA_SDC_MASK 0x00010000
-#define IDMA_SDC_PARTIAL 0x00020000
-#define IDMA_ADC_SYS1_WR 0x00040000
-#define IDMA_ADC_SYS2_WR 0x00080000
-#define IDMA_ADC_SYS1_CMD 0x00100000
-#define IDMA_ADC_SYS2_CMD 0x00200000
-#define IDMA_ADC_SYS1_RD 0x00400000
-#define IDMA_ADC_SYS2_RD 0x00800000
-#define IDMA_PF_QP 0x01000000
-#define IDMA_PF_BSP 0x02000000
-#define IDMA_PF_Y_IN 0x04000000
-#define IDMA_PF_U_IN 0x08000000
-#define IDMA_PF_V_IN 0x10000000
-#define IDMA_PF_Y_OUT 0x20000000
-#define IDMA_PF_U_OUT 0x40000000
-#define IDMA_PF_V_OUT 0x80000000
-
-#define TSTAT_PF_H264_PAUSE 0x00000001
-#define TSTAT_CSI2MEM_MASK 0x0000000C
-#define TSTAT_CSI2MEM_OFFSET 2
-#define TSTAT_VF_MASK 0x00000600
-#define TSTAT_VF_OFFSET 9
-#define TSTAT_VF_ROT_MASK 0x000C0000
-#define TSTAT_VF_ROT_OFFSET 18
-#define TSTAT_ENC_MASK 0x00000180
-#define TSTAT_ENC_OFFSET 7
-#define TSTAT_ENC_ROT_MASK 0x00030000
-#define TSTAT_ENC_ROT_OFFSET 16
-#define TSTAT_PP_MASK 0x00001800
-#define TSTAT_PP_OFFSET 11
-#define TSTAT_PP_ROT_MASK 0x00300000
-#define TSTAT_PP_ROT_OFFSET 20
-#define TSTAT_PF_MASK 0x00C00000
-#define TSTAT_PF_OFFSET 22
-#define TSTAT_ADCSYS1_MASK 0x03000000
-#define TSTAT_ADCSYS1_OFFSET 24
-#define TSTAT_ADCSYS2_MASK 0x0C000000
-#define TSTAT_ADCSYS2_OFFSET 26
-
-#define TASK_STAT_IDLE 0
-#define TASK_STAT_ACTIVE 1
-#define TASK_STAT_WAIT4READY 2
-
-struct idmac {
- struct dma_device dma;
-};
-
-struct ipu {
- void __iomem *reg_ipu;
- void __iomem *reg_ic;
- unsigned int irq_fn; /* IPU Function IRQ to the CPU */
- unsigned int irq_err; /* IPU Error IRQ to the CPU */
- unsigned int irq_base; /* Beginning of the IPU IRQ range */
- unsigned long channel_init_mask;
- spinlock_t lock;
- struct clk *ipu_clk;
- struct device *dev;
- struct idmac idmac;
- struct idmac_channel channel[IPU_CHANNELS_NUM];
- struct tasklet_struct tasklet;
-};
-
-#define to_idmac(d) container_of(d, struct idmac, dma)
-
-extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
-extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
-
-extern bool ipu_irq_status(uint32_t irq);
-extern int ipu_irq_map(unsigned int source);
-extern int ipu_irq_unmap(unsigned int source);
-
-#endif
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
deleted file mode 100644
index 97d9a6f04f2a2b..00000000000000
--- a/drivers/dma/ipu/ipu_irq.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/dma/ipu-dma.h>
-
-#include "ipu_intern.h"
-
-/*
- * Register read / write - shall be inlined by the compiler
- */
-static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ipu + reg);
-}
-
-static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ipu + reg);
-}
-
-
-/*
- * IPU IRQ chip driver
- */
-
-#define IPU_IRQ_NR_FN_BANKS 3
-#define IPU_IRQ_NR_ERR_BANKS 2
-#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
-
-struct ipu_irq_bank {
- unsigned int control;
- unsigned int status;
- struct ipu *ipu;
-};
-
-static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
- /* 3 groups of functional interrupts */
- {
- .control = IPU_INT_CTRL_1,
- .status = IPU_INT_STAT_1,
- }, {
- .control = IPU_INT_CTRL_2,
- .status = IPU_INT_STAT_2,
- }, {
- .control = IPU_INT_CTRL_3,
- .status = IPU_INT_STAT_3,
- },
- /* 2 groups of error interrupts */
- {
- .control = IPU_INT_CTRL_4,
- .status = IPU_INT_STAT_4,
- }, {
- .control = IPU_INT_CTRL_5,
- .status = IPU_INT_STAT_5,
- },
-};
-
-struct ipu_irq_map {
- unsigned int irq;
- int source;
- struct ipu_irq_bank *bank;
- struct ipu *ipu;
-};
-
-static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
-/* Protects allocations from the above array of maps */
-static DEFINE_MUTEX(map_lock);
-/* Protects register accesses and individual mappings */
-static DEFINE_RAW_SPINLOCK(bank_lock);
-
-static struct ipu_irq_map *src2map(unsigned int src)
-{
- int i;
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
- if (irq_map[i].source == src)
- return irq_map + i;
-
- return NULL;
-}
-
-static void ipu_irq_unmask(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- uint32_t reg;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- reg = ipu_read_reg(bank->ipu, bank->control);
- reg |= (1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-static void ipu_irq_mask(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- uint32_t reg;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- reg = ipu_read_reg(bank->ipu, bank->control);
- reg &= ~(1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-static void ipu_irq_ack(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-/**
- * ipu_irq_status() - returns the current interrupt status of the specified IRQ.
- * @irq: interrupt line to get status for.
- * @return: true if the interrupt is pending/asserted or false if the
- * interrupt is not pending.
- */
-bool ipu_irq_status(unsigned int irq)
-{
- struct ipu_irq_map *map = irq_get_chip_data(irq);
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
- bool ret;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- bank = map->bank;
- ret = bank && ipu_read_reg(bank->ipu, bank->status) &
- (1UL << (map->source & 31));
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- return ret;
-}
-
-/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
- * @source: interrupt source bit position (see below)
- * @return: mapped IRQ number or negative error code
- *
- * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
- * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
- * However, the source argument of this function is not the sequence number of
- * the possible IRQ, but rather its bit position. So, first interrupt in fourth
- * register has source number 96, and not 88. This makes calculations easier,
- * and also provides forward compatibility with any future IPU implementations
- * with any interrupt bit assignments.
- */
-int ipu_irq_map(unsigned int source)
-{
- int i, ret = -ENOMEM;
- struct ipu_irq_map *map;
-
- might_sleep();
-
- mutex_lock(&map_lock);
- map = src2map(source);
- if (map) {
- pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
- ret = -EBUSY;
- goto out;
- }
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- if (irq_map[i].source < 0) {
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = source;
- irq_map[i].bank = irq_bank + source / 32;
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- ret = irq_map[i].irq;
- pr_debug("IPU: mapped source %u to IRQ %u\n",
- source, ret);
- break;
- }
- }
-out:
- mutex_unlock(&map_lock);
-
- if (ret < 0)
- pr_err("IPU: couldn't map source %u: %d\n", source, ret);
-
- return ret;
-}
-
-/**
- * ipu_irq_unmap() - unmap an IPU interrupt source
- * @source: interrupt source bit position (see ipu_irq_map())
- * @return: 0 or negative error code
- */
-int ipu_irq_unmap(unsigned int source)
-{
- int i, ret = -EINVAL;
-
- might_sleep();
-
- mutex_lock(&map_lock);
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- if (irq_map[i].source == source) {
- unsigned long lock_flags;
-
- pr_debug("IPU: unmapped source %u from IRQ %u\n",
- source, irq_map[i].irq);
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = -EINVAL;
- irq_map[i].bank = NULL;
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- ret = 0;
- break;
- }
- }
- mutex_unlock(&map_lock);
-
- return ret;
-}
-
-/* Chained IRQ handler for IPU function and error interrupt */
-static void ipu_irq_handler(struct irq_desc *desc)
-{
- struct ipu *ipu = irq_desc_get_handler_data(desc);
- u32 status;
- int i, line;
-
- for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
-
- raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /*
- * Don't think we have to clear all interrupts here, they will
- * be acked by ->handle_irq() (handle_level_irq). However, we
- * might want to clear unhandled interrupts after the loop...
- */
- status &= ipu_read_reg(ipu, bank->control);
- raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
- unsigned int irq;
-
- line--;
- status &= ~(1UL << line);
-
- raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (!map) {
- raw_spin_unlock(&bank_lock);
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
- line, i);
- continue;
- }
- irq = map->irq;
- raw_spin_unlock(&bank_lock);
- generic_handle_irq(irq);
- }
- }
-}
-
-static struct irq_chip ipu_irq_chip = {
- .name = "ipu_irq",
- .irq_ack = ipu_irq_ack,
- .irq_mask = ipu_irq_mask,
- .irq_unmask = ipu_irq_unmask,
-};
-
-/* Install the IRQ handler */
-int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
-{
- unsigned int irq, i;
- int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
- numa_node_id());
-
- if (irq_base < 0)
- return irq_base;
-
- for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
- irq_bank[i].ipu = ipu;
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- int ret;
-
- irq = irq_base + i;
- ret = irq_set_chip(irq, &ipu_irq_chip);
- if (ret < 0)
- return ret;
- ret = irq_set_chip_data(irq, irq_map + i);
- if (ret < 0)
- return ret;
- irq_map[i].ipu = ipu;
- irq_map[i].irq = irq;
- irq_map[i].source = -EINVAL;
- irq_set_handler(irq, handle_level_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
-
- irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
-
- ipu->irq_base = irq_base;
-
- return 0;
-}
-
-void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
-{
- unsigned int irq, irq_base;
-
- irq_base = ipu->irq_base;
-
- irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
-
- irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
-
- for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
- irq_set_status_flags(irq, IRQ_NOREQUEST);
- irq_set_chip(irq, NULL);
- irq_set_chip_data(irq, NULL);
- }
-}
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 1709d159af7e01..4117c7b67e9c2e 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -1732,9 +1732,4 @@ static struct platform_driver intel_ldma_driver = {
* registered DMA channels and DMA capabilities to clients before their
* initialization.
*/
-static int __init intel_ldma_init(void)
-{
- return platform_driver_register(&intel_ldma_driver);
-}
-
-device_initcall(intel_ldma_init);
+builtin_platform_driver(intel_ldma_driver);
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
index df98cae8792bae..2b6436f4b19374 100644
--- a/drivers/dma/lpc18xx-dmamux.c
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -12,8 +12,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma-main.c
index 9413fad08a60ca..b359421ee9ea52 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -19,7 +19,6 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int ch;
- struct fsl_edma_chan *mcf_chan;
u64 intmap;
intmap = ioread32(regs->inth);
@@ -31,31 +30,7 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
if (intmap & BIT(ch)) {
iowrite8(EDMA_MASK_CH(ch), regs->cint);
-
- mcf_chan = &mcf_edma->chans[ch];
-
- spin_lock(&mcf_chan->vchan.lock);
-
- if (!mcf_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&mcf_chan->vchan.lock);
- continue;
- }
-
- if (!mcf_chan->edesc->iscyclic) {
- list_del(&mcf_chan->edesc->vdesc.node);
- vchan_cookie_complete(&mcf_chan->edesc->vdesc);
- mcf_chan->edesc = NULL;
- mcf_chan->status = DMA_COMPLETE;
- mcf_chan->idle = true;
- } else {
- vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
- }
-
- if (!mcf_chan->edesc)
- fsl_edma_xfer_desc(mcf_chan);
-
- spin_unlock(&mcf_chan->vchan.lock);
+ fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -76,8 +51,7 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
if (err & BIT(ch)) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
- mcf_edma->chans[ch].status = DMA_ERROR;
- mcf_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -172,7 +146,7 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
}
static struct fsl_edma_drvdata mcf_data = {
- .version = v2,
+ .flags = FSL_EDMA_DRV_EDMA64,
.setup_irq = mcf_edma_irq_init,
};
@@ -180,9 +154,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
{
struct mcf_edma_platform_data *pdata;
struct fsl_edma_engine *mcf_edma;
- struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
- int ret, i, len, chans;
+ int ret, i, chans;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
@@ -197,8 +170,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
chans = pdata->dma_channels;
}
- len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
- mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
+ GFP_KERNEL);
if (!mcf_edma)
return -ENOMEM;
@@ -227,7 +200,9 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
- iowrite32(0x0, &regs->tcd[i].csr);
+ mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+ + i * sizeof(struct fsl_edma_hw_tcd);
+ iowrite32(0x0, &mcf_chan->tcd->csr);
}
iowrite32(~0, regs->inth);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 9ae92b8940efe3..324b7387b1b922 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -18,7 +18,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 69cc61c0b262b3..64120767d9838c 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index a1517ef1f4a018..c51dc017b48a84 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -551,7 +550,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
rc = dma_async_device_register(&mtkd->ddev);
if (rc)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4a51fdbf5aa9c3..1104017320b888 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -36,11 +36,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/random.h>
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index acc4d53e4630d1..cfb9962417ef68 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
#include <linux/dma/mxs-dma.h>
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index e72e8c10355ea2..0b2f96fd8bf0c6 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -15,7 +15,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index b6e0ac8314e5cd..384476757c5e3a 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -20,8 +20,9 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "virt-dma.h"
@@ -1116,7 +1117,7 @@ static int owl_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
nr_channels, nr_requests);
- od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
+ od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
od->nr_pchans = nr_channels;
od->nr_vchans = nr_requests;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 686c270ef7100c..f9b82dff33871c 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -28,7 +28,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include "adma.h"
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 932628b319c812..1c93864e0e4d41 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2160,8 +2160,7 @@ static int gpi_probe(struct platform_device *pdev)
return -ENOMEM;
gpi_dev->dev = &pdev->dev;
- gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
+ gpi_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &gpi_dev->res);
if (IS_ERR(gpi_dev->regs))
return PTR_ERR(gpi_dev->regs);
gpi_dev->ee_base = gpi_dev->regs;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 344525c3a32faf..834ae519c15de0 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -45,12 +45,12 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -765,17 +765,15 @@ static int hidma_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
if (IS_ERR(trca)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(trca);
goto bailout;
}
- evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
if (IS_ERR(evca)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(evca);
goto bailout;
}
@@ -785,7 +783,7 @@ static int hidma_probe(struct platform_device *pdev)
*/
chirq = platform_get_irq(pdev, 0);
if (chirq < 0) {
- rc = -ENODEV;
+ rc = chirq;
goto bailout;
}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 05e96b31d87169..1d675f31252b89 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -176,10 +176,9 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ virtaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(virtaddr)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(virtaddr);
goto out;
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 9479f29692d3e3..f777addda8bac0 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -9,6 +9,7 @@
* Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
*/
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -145,8 +146,8 @@ struct rz_dmac {
#define CHCFG_REQD BIT(3)
#define CHCFG_SEL(bits) ((bits) & 0x07)
#define CHCFG_MEM_COPY (0x80400008)
-#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
-#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
+#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
@@ -607,13 +608,15 @@ static int rz_dmac_config(struct dma_chan *chan,
if (val == CHCFG_DS_INVALID)
return -EINVAL;
- channel->chcfg |= CHCFG_FILL_DDS(val);
+ channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
- channel->chcfg |= CHCFG_FILL_SDS(val);
+ channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
return 0;
}
@@ -947,7 +950,6 @@ static int rz_dmac_probe(struct platform_device *pdev)
dma_register_err:
of_dma_controller_free(pdev->dev.of_node);
err:
- reset_control_assert(dmac->rstc);
channel_num = i ? i - 1 : 0;
for (i = 0; i < channel_num; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -958,6 +960,7 @@ err:
channel->lmdesc.base_dma);
}
+ reset_control_assert(dmac->rstc);
err_pm_runtime_put:
pm_runtime_put(&pdev->dev);
err_pm_disable:
@@ -971,6 +974,8 @@ static int rz_dmac_remove(struct platform_device *pdev)
struct rz_dmac *dmac = platform_get_drvdata(pdev);
unsigned int i;
+ dma_async_device_unregister(&dmac->engine);
+ of_dma_controller_free(pdev->dev.of_node);
for (i = 0; i < dmac->n_channels; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -979,8 +984,6 @@ static int rz_dmac_remove(struct platform_device *pdev)
channel->lmdesc.base,
channel->lmdesc.base_dma);
}
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&dmac->engine);
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 5aafe548ca5f30..00067b29e23223 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rculist.h>
@@ -678,7 +677,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
struct sh_dmae_device *shdev;
struct dma_device *dma_dev;
- struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ struct resource *dmars, *errirq_res, *chanirq_res;
if (pdev->dev.of_node)
pdata = of_device_get_match_data(&pdev->dev);
@@ -689,7 +688,6 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!pdata || !pdata->channel_num)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* DMARS area is optional */
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
/*
@@ -709,7 +707,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
* requested with the IRQF_SHARED flag
*/
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !errirq_res)
+ if (!errirq_res)
return -ENODEV;
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
@@ -719,7 +717,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(shdev->chan_reg))
return PTR_ERR(shdev->chan_reg);
if (dmars) {
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2b639adb48ba50..168aa0bd73a0dd 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 825001bde42c4b..89e82508c13392 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3590,6 +3590,10 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
base->irq = platform_get_irq(pdev, 0);
+ if (base->irq < 0) {
+ ret = base->irq;
+ goto destroy_cache;
+ }
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 37674029cb427c..5c36811aa1342c 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1581,8 +1581,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd = &dmadev->ddev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ dmadev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index e415bd9f4f2b6a..8d77e2a7939a0d 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -15,8 +15,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 1d0e9dd72ab392..0de234022c6d6d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -24,7 +24,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index ebfd29888b2f54..2469efddf54015 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -14,8 +14,8 @@
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 8f67f453a4922c..33b10100110096 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -13,7 +13,7 @@
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index cc6b91f4897992..063022f9df7633 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b9700403607178..e557bada151073 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -8,9 +8,10 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index f744ddbbbad7fa..7f17ee87a6dce3 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -3,14 +3,15 @@
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
#define TI_XBAR_DRA7 0
#define TI_XBAR_AM335X 1
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 9ea91c640c324d..aa8e2e8ac26098 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -20,7 +20,6 @@
#include <linux/of_dma.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/edma.h>
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 85e00701473cb1..05228bf0003332 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -3,6 +3,8 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
+#include <linux/of.h>
+#include <linux/of_platform.h>
int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
{
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index eb4dc5fffe6468..30fd2f386f36a1 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -20,7 +20,6 @@
#include <linux/sys_soc.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 02e1c08c596d1d..cf96cf915c0c74 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -16,8 +16,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include "../virt-dma.h"
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 3589b4ef50b83f..bb4ff8c86733bf 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -18,8 +18,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "dmaengine.h"
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index ac09f0e5f58d88..0a3b2e22f23dbb 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -41,10 +41,10 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -173,12 +173,15 @@
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
+#define XILINX_DMA_CR_DELAY_SHIFT 24
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_BD_COMP_MASK BIT(31)
#define XILINX_DMA_COALESCE_MAX 255
-#define XILINX_DMA_NUM_DESCS 255
+#define XILINX_DMA_NUM_DESCS 512
#define XILINX_DMA_NUM_APP_WORDS 5
/* AXI CDMA Specific Registers/Offsets */
@@ -410,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
* @stop_transfer: Differentiate b/w DMA IP's quiesce
* @tdest: TDEST value for mcdma
* @has_vflip: S2MM vertical flip
+ * @irq_delay: Interrupt delay timeout
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -448,6 +452,7 @@ struct xilinx_dma_chan {
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
bool has_vflip;
+ u8 irq_delay;
};
/**
@@ -493,6 +498,7 @@ struct xilinx_dma_config {
* @s2mm_chan_id: DMA s2mm channel identifier
* @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length
+ * @has_axistream_connected: AXI DMA connected to AXI Stream IP
*/
struct xilinx_dma_device {
void __iomem *regs;
@@ -511,6 +517,7 @@ struct xilinx_dma_device {
u32 s2mm_chan_id;
u32 mm2s_chan_id;
u32 max_buffer_len;
+ bool has_axistream_connected;
};
/* Macros */
@@ -623,6 +630,29 @@ static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
}
}
+/**
+ * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
+ * @tx: async transaction descriptor
+ * @payload_len: metadata payload length
+ * @max_len: metadata max length
+ * Return: The app field pointer.
+ */
+static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
+ size_t *payload_len, size_t *max_len)
+{
+ struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_axidma_tx_segment *seg;
+
+ *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
+ seg = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ return seg->hw.app;
+}
+
+static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
+ .get_ptr = xilinx_dma_get_metadata_ptr,
+};
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -1535,6 +1565,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
+ reg &= ~XILINX_DMA_CR_DELAY_MAX;
+ reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
xilinx_dma_start(chan);
@@ -1683,6 +1716,14 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ struct xilinx_axidma_tx_segment *seg;
+
+ seg = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
+ break;
+ }
if (chan->has_sg && chan->xdev->dma_config->dmatype !=
XDMA_TYPE_VDMA)
desc->residue = xilinx_dma_get_residue(chan, desc);
@@ -1816,7 +1857,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
spin_unlock(&chan->lock);
}
- tasklet_schedule(&chan->tasklet);
+ tasklet_hi_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
@@ -1864,15 +1905,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
}
}
- if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness.
- */
- dev_dbg(chan->dev, "Inter-packet latency too long\n");
- }
-
- if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+ if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
+ XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
chan->idle = true;
@@ -2221,6 +2255,9 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment->hw.control |= XILINX_DMA_BD_EOP;
}
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
return &desc->async_tx;
error:
@@ -2796,6 +2833,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
+ of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
+
chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
err = of_property_read_u32(node, "xlnx,datawidth", &value);
@@ -3067,6 +3106,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ xdev->has_axistream_connected =
+ of_property_read_bool(node, "xlnx,axistream-connected");
+ }
+
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
&num_frames);
@@ -3092,6 +3136,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
else
xdev->ext_addr = false;
+ /* Set metadata mode */
+ if (xdev->has_axistream_connected)
+ xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
+
/* Set the dma mask bits */
err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
if (err < 0) {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9360f43b8e0f3c..bd8c3cc2eaab6f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -11,8 +11,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>