diff --git a/README.md b/README.md index 03950da..ec88d9c 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,13 @@ This is the MathWorks Yocto layer to make it easier to interface MATLAB(R)/Simulink(TM) SoC Blockset software feature set with AMD-Xilinx FPGA SoC hardware platforms (like AMD-Xilinx Zynq-7000, Xilinx ZynqMPSoC, Xilinx ZynqRFSoC, Xilinx Versal). ### Dependencies -This layer is added on top of the [PetaLinux BSPs provided by AMD-Xilinx](https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/2347204609/2022.1+Release). +This layer is added on top of the [PetaLinux BSPs provided by AMD-Xilinx](https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/2654765057/2023.1+Release). ### Using the meta-mathworks Layer -PetaLinux ecosystem of Linux build system can be used to generate a new Embedded Linux image for use with the AMD-Xilinx FPGA SoC platforms. PetaLinux is a wrapper around the Yocto tools to make the configuration of the Embedded Linux image easier. For more information about the PetaLinux SDK refer to [PetaLinux User Guide](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2022_1/ug1144-petalinux-tools-reference-guide.pdf) or [PetaLinux Wiki] (https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842250/PetaLinux). +PetaLinux ecosystem of Linux build system can be used to generate a new Embedded Linux image for use with the AMD-Xilinx FPGA SoC platforms. PetaLinux is a wrapper around the Yocto tools to make the configuration of the Embedded Linux image easier. For more information about the PetaLinux SDK refer to [PetaLinux User Guide](https://docs.amd.com/r/2023.1-English/ug1144-petalinux-tools-reference-guide/Overview) or [PetaLinux Wiki](https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842250/PetaLinux). -This layer supports PetaLinux 2022.1 version of PetaLinux tools. +This layer supports PetaLinux 2023.1 version of PetaLinux tools. To build a PetaLinux project using the MathWorks yocto layer, you need a Linux computer or a Virtual Machine running Ubuntu where you have downloaded and already installed [PetaLinux software](https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842250/PetaLinux). @@ -16,7 +16,7 @@ Open a Linux terminal and run following commands: ```bash source /settings.sh -git clone https://github.com/mathworks/meta-mathworks -b petalinux-2022.1 +git clone https://github.com/mathworks/meta-mathworks -b petalinux-2023.1 petalinux-create -t project --template --name Here can be any one of: * zynqMP (for Xilinx Zynq UltraScale+ MPSoC platform) @@ -26,7 +26,7 @@ petalinux-create -t project --template --name petalinux-config --get-hw-description= Here XSA, Xilinx Software Architecture, file is obtained by exporting the Vivado project for embedded software development. ``` -Alternatively, you can use a [pre-built PetaLinux BSP file](https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/2347204609/2022.1+Release) for your hardware board to create the project. +Alternatively, you can use a [pre-built PetaLinux BSP file](https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/2654765057/2023.1+Release) for your hardware board to create the project. ```bash petalinux-create -t project -s diff --git a/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/init.d/usb_network b/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/init.d/usb_network index 70db7a0..08e0e25 100644 --- a/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/init.d/usb_network +++ b/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/init.d/usb_network @@ -19,7 +19,10 @@ create_system_files () { then UDHCPD_CONF=/etc/udhcpd.conf IFAC=/etc/network/interfaces - NETMASK=`fw_printenv -n netmask 2> /dev/null || echo 255.255.255.0` + NETMASK=`echo $NET_MASK` + if [ -z "$NETMASK" ]; then + NETMASK="255.255.255.0" + fi ### Replace /etc/udhcpd.conf ### echo "start $USB_IPADDR_HOST" > $UDHCPD_CONF echo "end $USB_IPADDR_HOST" >> $UDHCPD_CONF diff --git a/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/udev/rules.d/100-mw-drivers.rules b/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/udev/rules.d/100-mw-drivers.rules index d6f9a69..6cc0e59 100644 --- a/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/udev/rules.d/100-mw-drivers.rules +++ b/recipes-apps/mw-fs-overlay/files/common/fs-overlay/etc/udev/rules.d/100-mw-drivers.rules @@ -1,2 +1,8 @@ # map mwipcore0 to mwipcore for legacy models - KERNEL=="mwipcore0", SYMLINK+="mwipcore" +KERNEL=="mwipcore0", SYMLINK+="mwipcore" +KERNEL=="mwipcore_frame_buffer0", SYMLINK+="mwipcore_frame_buffer" +KERNEL=="mwipcore_framecapture0", SYMLINK+="mwipcore_framecapture" +KERNEL=="mwipcore_hdmi_out0", SYMLINK+="mwipcore_hdmi_out" +KERNEL=="mwipcore_dutbypass0", SYMLINK+="mwipcore_dutbypass" +KERNEL=="mwipcore_hdmi_in0", SYMLINK+="mwipcore_hdmi_in" +KERNEL=="mwipcore_tpg0", SYMLINK+="mwipcore_tpg" diff --git a/recipes-apps/mw-fs-overlay/mw-fs-overlay.bb b/recipes-apps/mw-fs-overlay/mw-fs-overlay.bb index 34248ef..e8f99a0 100644 --- a/recipes-apps/mw-fs-overlay/mw-fs-overlay.bb +++ b/recipes-apps/mw-fs-overlay/mw-fs-overlay.bb @@ -39,7 +39,7 @@ SERVICEUNITS = "sdcard_mount.service usb_network.service \ inetd.service user_app.service nfs-common.service hostname.service \ backupSSHKeys.service restoreSSHKeys.service udc.service " -SYSTEMD_SERVICE:${PN} = "${@bb.utils.contains('INIT_MANAGER','systemd','${SERVICEUNITS}','" "',d)}" +SYSTEMD_SERVICE:${PN} = "${@bb.utils.contains('INIT_MANAGER','systemd','${SERVICEUNITS}','',d)}" do_install() { chmod -R 0755 ${WORKDIR}/common/fs-overlay/usr/sbin/ diff --git a/recipes-apps/mw-refdesign-dtb/files/CMakeLists.txt b/recipes-apps/mw-refdesign-dtb/files/CMakeLists.txt index b9b32cd..021da52 100644 --- a/recipes-apps/mw-refdesign-dtb/files/CMakeLists.txt +++ b/recipes-apps/mw-refdesign-dtb/files/CMakeLists.txt @@ -46,6 +46,7 @@ list(APPEND SUPPORTED_BOARDS zc706 zc702 zed + zcu106_imx ) foreach (b IN LISTS SUPPORTED_BOARDS) diff --git a/recipes-apps/mw-refdesign-dtb/files/common/dts/mw-dlhdl-iio-common.dtsi b/recipes-apps/mw-refdesign-dtb/files/common/dts/mw-dlhdl-iio-common.dtsi index 164800e..873fdf5 100644 --- a/recipes-apps/mw-refdesign-dtb/files/common/dts/mw-dlhdl-iio-common.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/common/dts/mw-dlhdl-iio-common.dtsi @@ -50,8 +50,6 @@ compatible = "mathworks,mwipcore-v3.00"; #address-cells = <0x1>; #size-cells = <0x0>; - linux,phandle = <0x41>; - phandle = <0x41>; mmrd-channel@0 { reg = <0x0>; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/base.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/base.dtsi index 277e193..5790602 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/base.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/base.dtsi @@ -1,5 +1,6 @@ /dts-v1/; +#include "zynq.dtsi" #include "zynq-picozed.dtsi" #include "zynq-mw-common.dtsi" diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/visionzynq.dts b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/visionzynq.dts new file mode 100644 index 0000000..af39202 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/visionzynq.dts @@ -0,0 +1,2 @@ +#include "base.dtsi" +#include "zynq-mw-visionzynq-common.dtsi" diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/visionzynq_axis.dts b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/visionzynq_axis.dts new file mode 100644 index 0000000..ee69bd0 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/visionzynq_axis.dts @@ -0,0 +1,3 @@ +#include "base.dtsi" +#include "zynq-mw-visionzynq-common.dtsi" +#include "zynq-mw-visionzynq-axis.dtsi" diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq-picozed.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq-picozed.dtsi index 04129c5..45c6dde 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq-picozed.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq-picozed.dtsi @@ -1,5 +1,3 @@ -/include/ "zynq.dtsi" - / { model = "Xilinx Zynq PicoZed"; memory { @@ -13,19 +11,28 @@ }; }; -ð { +&aliases { + ethernet1 = &gem1; +}; + +&gem1 { status = "okay"; - phy-handle = <&phy0>; - phy-mode = "rgmii-id"; + phy-handle = <&gmiitorgmii>; + phy-mode = "gmii"; + + gmiitorgmii: gmiitorgmii@8 { + compatible = "xlnx,gmii-to-rgmii-1.0"; + reg = <0x8>; + phy-handle = <&phy1>; + }; - phy0: phy@0 { - compatible = "marvell,88e1510"; + phy1: phy@1 { device_type = "ethernet-phy"; - reg = <0x0>; - marvell,reg-init=<3 16 0xff00 0x1e 3 17 0xfff0 0x0a>; + reg = <0x1>; + marvell,reg-init = <3 16 0xff00 0x1e 3 17 0xfff0 0x00>; }; }; - + &sdhci1 { status = "okay"; /* SD1 is onnected to a non-removable eMMC flash device */ @@ -75,4 +82,4 @@ reg = <0xC00000 0x400000>; }; }; -}; \ No newline at end of file +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq.dtsi new file mode 100644 index 0000000..465f00e --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/picozed/dts/zynq.dtsi @@ -0,0 +1,32 @@ + +#include "zynq-7000.dtsi" + +/ { + interrupt-parent = <&intc>; + + aliases: aliases { + ethernet0 = &gem0; + serial0 = &uart1; + }; +}; + +&gem0 { + status = "okay"; +}; + +&clkc { + ps-clk-frequency = <33333333>; +}; + +&usb0 { + status = "okay"; + dr_mode = "host"; /* This breaks OTG mode */ +}; + +&uart1 { + status = "okay"; +}; + +&sdhci0 { + status = "okay"; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/zed/dts/base.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/zed/dts/base.dtsi index bc32662..4955aae 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/boards/zed/dts/base.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/boards/zed/dts/base.dtsi @@ -1,6 +1,13 @@ #include "zynq-zed.dts" #include "zynq-mw-common.dtsi" #include "zynq-mw-axilite-common.dtsi" + +/ { + chosen { + bootargs = "earlycon root=/dev/ram rw cma=128M"; + }; +}; + &mwipcore0 { reg = <0x400D0000 0xffff>; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-adi-mw-axistream-iio-common.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-adi-mw-axistream-iio-common.dtsi index 652c17e..f31ebb2 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-adi-mw-axistream-iio-common.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-adi-mw-axistream-iio-common.dtsi @@ -3,10 +3,10 @@ &axi4stream_mm2s { reg = <0x40020000 0x10000>; - interrupts = <0x0 0x1d 0x0>; + interrupts = <0x0 0x1d 0x04>; }; &axi4stream_s2mm { reg = <0x40030000 0x10000>; - interrupts = <0x0 0x1e 0x0>; + interrupts = <0x0 0x1e 0x04>; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-ad9361.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-ad9361.dtsi index 84b1b91..2300c99 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-ad9361.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-ad9361.dtsi @@ -9,9 +9,12 @@ &fpga_axi { /delete-node/ mwipcore@43c00000; + /delete-node/ dma@43000000; cf_ad9361_dac_core_0: cf-ad9361-dds-core-lpc@79024000 { /* Mute DDS by default */ adi,axi-dds-default-scale = <0>; + adi,axi-dds-rate = <0x01>; + adi,axi-dds-1-rf-channel; }; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-dlhdl-iio-common.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-dlhdl-iio-common.dtsi index 5152acb..28d0d0a 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-dlhdl-iio-common.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-dlhdl-iio-common.dtsi @@ -1,14 +1,14 @@ &fpga_axi { - mwipcore_ddr0: mwipcore_ddr@0 { - compatible = "mathworks,mwipcore-v2.00"; - reg = <0x40010000 0x10000>; - }; + mwipcore_dl0: mwipcore_dl@0 { + compatible = "mathworks,mwipcore-v2.00"; + reg = <0x44A00000 0x10000>; + }; }; &fpga_axi { - mwipcore_dl0: mwipcore_dl@0 { + mwipcore_ddr0: mwipcore_ddr@0 { compatible = "mathworks,mwipcore-v2.00"; - reg = <0x44A00000 0x10000>; + reg = <0x40010000 0x10000>; }; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-fmcomms5.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-fmcomms5.dtsi index 88f5749..a464626 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-fmcomms5.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-fmcomms5.dtsi @@ -9,9 +9,12 @@ &fpga_axi { /delete-node/ mwipcore@43c00000; + /delete-node/ dma@43000000; cf_ad9361_dac_core_0: cf-ad9361-dds-core-lpc@79024000 { /* Mute DDS by default */ adi,axi-dds-default-scale = <0>; + adi,axi-dds-rate = <0x01>; + adi,axi-dds-1-rf-channel; }; cf_ad9361_dac_core_1: cf-ad9361-dds-core-B@79044000 { diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-hdmicam-common.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-hdmicam-common.dtsi index 1355d89..a0cab2c 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-hdmicam-common.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-hdmicam-common.dtsi @@ -13,7 +13,7 @@ reg = <1>; adv7511@39 { - compatible = "adv7511-media"; + compatible = "adv7511-v4l2"; reg = <0x39>; powerdown-gpio = <&pca9534 4 GPIO_ACTIVE_HIGH>; edid-addr = <0x3F>; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-imageon-common.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-imageon-common.dtsi index 12bdab5..0a9ecd2 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-imageon-common.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynq/dts/zynq-mw-imageon-common.dtsi @@ -40,7 +40,7 @@ reg = <1>; adv7511@39 { - compatible = "adv7511-media"; + compatible = "adv7511-v4l2"; reg = <0x39>; powerdown-gpio = <&pca9534 4 GPIO_ACTIVE_HIGH>; edid-addr = <0x3F>; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu102/dts/fmcomms2_axis.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu102/dts/fmcomms2_axis.dts index 8b52674..5206bce 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu102/dts/fmcomms2_axis.dts +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu102/dts/fmcomms2_axis.dts @@ -1,2 +1,8 @@ #include "fmcomms2.dts" #include "zynqmp-mw-ad9361-axis.dtsi" +&axi4stream_mm2s{ + interrupts = <0x00 0x59 0x04>; +}; +&axi4stream_s2mm{ + interrupts = <0x00 0x5a 0x04>; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/base.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/base.dtsi new file mode 100644 index 0000000..05e4c83 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/base.dtsi @@ -0,0 +1,6 @@ +#include "zynqmp-zcu106-revA.dts" +/ { + chosen { + bootargs = "earlycon root=/dev/ram rw"; + }; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/li-imx274mipi-fmc.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/li-imx274mipi-fmc.dtsi new file mode 100644 index 0000000..e16e86e --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/li-imx274mipi-fmc.dtsi @@ -0,0 +1,29 @@ +&i2c1 { + i2cswitch@75 { + i2c@0 { + eeprom@57 { /* HPC0_IIC */ + compatible = "at,24c64"; + reg = <0x57>; + }; + }; + }; +}; + +&sensor_iic_0 { + clocks = <&vid_s_axi_clk>; + imx274: sensor@1a{ + compatible = "sony,imx274"; + reg = <0x1a>; + #address-cells = <1>; + #size-cells = <0>; + reset-gpios = <&gpio 90 0>; + + port@0 { + reg = <0>; + + sensor_out: endpoint { + remote-endpoint = <&mipi_csi_inmipi_csi2_rx_mipi_csi2_rx_subsystem_0>; + }; + }; + }; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/pcw.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/pcw.dtsi new file mode 100644 index 0000000..92300c4 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/pcw.dtsi @@ -0,0 +1,181 @@ +/* + * CAUTION: This file is automatically generated by Xilinx. + * Version: XSCT 2020.2 + * Today is: Wed Feb 9 18:55:34 2022 + */ + + +&fclk2 { + status = "okay"; +}; +&gic { + num_cpus = <2>; + num_interrupts = <96>; +}; +&lpd_dma_chan1 { + status = "okay"; +}; +&lpd_dma_chan2 { + status = "okay"; +}; +&lpd_dma_chan3 { + status = "okay"; +}; +&lpd_dma_chan4 { + status = "okay"; +}; +&lpd_dma_chan5 { + status = "okay"; +}; +&lpd_dma_chan6 { + status = "okay"; +}; +&lpd_dma_chan7 { + status = "okay"; +}; +&lpd_dma_chan8 { + status = "okay"; +}; +&xilinx_ams { + status = "okay"; +}; +&can1 { + status = "okay"; +}; +&cci { + status = "okay"; +}; +&zynqmp_dpsub { + phy-names = "dp-phy0","dp-phy1"; + phys = <&psgtr 1 6 0 3 27000000>, <&psgtr 0 6 1 3 27000000>; + status = "okay"; + xlnx,max-lanes = <2>; +}; +&zynqmp_dpdma { + status = "okay"; +}; +&gem3 { + phy-mode = "rgmii-id"; + status = "okay"; + xlnx,ptp-enet-clock = <0x0>; +}; +&fpd_dma_chan1 { + status = "okay"; +}; +&fpd_dma_chan2 { + status = "okay"; +}; +&fpd_dma_chan3 { + status = "okay"; +}; +&fpd_dma_chan4 { + status = "okay"; +}; +&fpd_dma_chan5 { + status = "okay"; +}; +&fpd_dma_chan6 { + status = "okay"; +}; +&fpd_dma_chan7 { + status = "okay"; +}; +&fpd_dma_chan8 { + status = "okay"; +}; +&gpio { + emio-gpio-width = <32>; + gpio-mask-high = <0x0>; + gpio-mask-low = <0x5600>; + status = "okay"; +}; +&gpu { + status = "okay"; + xlnx,tz-nonsecure = <0x1>; +}; +&i2c0 { + clock-frequency = <400000>; + status = "okay"; +}; +&i2c1 { + clock-frequency = <400000>; + status = "okay"; +}; +&qspi { + is-dual = <1>; + num-cs = <1>; + spi-rx-bus-width = <4>; + spi-tx-bus-width = <4>; + status = "okay"; +}; +&rtc { + status = "okay"; +}; +&sata { + ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>; + ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>; + ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>; + ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>; + ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>; + ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>; + ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>; + ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>; + status = "okay"; + xlnx,tz-nonsecure-sata0 = <0x0>; + xlnx,tz-nonsecure-sata1 = <0x0>; +}; +&sdhci1 { + clock-frequency = <187481262>; + status = "okay"; + xlnx,mio-bank = <0x1>; +}; +&uart0 { + cts-override ; + device_type = "serial"; + port-number = <0>; + status = "okay"; + u-boot,dm-pre-reloc ; +}; +&uart1 { + cts-override ; + device_type = "serial"; + port-number = <1>; + status = "okay"; + u-boot,dm-pre-reloc ; +}; +&usb0 { + status = "okay"; + xlnx,tz-nonsecure = <0x1>; + xlnx,usb-polarity = <0x0>; + xlnx,usb-reset-mode = <0x0>; +}; +&dwc3_0 { + status = "okay"; +}; +&lpd_watchdog { + status = "okay"; +}; +&watchdog0 { + status = "okay"; +}; +&pss_ref_clk { + clock-frequency = <33330000>; +}; +&ams_ps { + status = "okay"; +}; +&ams_pl { + status = "okay"; +}; +&zynqmp_dp_snd_pcm0 { + status = "okay"; +}; +&zynqmp_dp_snd_pcm1 { + status = "okay"; +}; +&zynqmp_dp_snd_card0 { + status = "okay"; +}; +&zynqmp_dp_snd_codec0 { + status = "okay"; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/stream_clk_300mhz.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/stream_clk_300mhz.dtsi new file mode 100644 index 0000000..00758e6 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/common/stream_clk_300mhz.dtsi @@ -0,0 +1,6 @@ +&vid_stream_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <300000000>; +}; + diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/pl.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/pl.dtsi new file mode 100644 index 0000000..63463af --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/pl.dtsi @@ -0,0 +1,297 @@ +/* + * CAUTION: This file is automatically generated by Xilinx. + * Version: XSCT 2020.2 + * Today is: Wed Feb 9 18:55:34 2022 + */ + + +/ { + amba_pl: amba_pl@0 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "simple-bus"; + ranges ; + axi_intc_0: interrupt-controller@a0055000 { + #interrupt-cells = <2>; + clock-names = "s_axi_aclk"; + clocks = <&zynqmp_clk 71>; + compatible = "xlnx,axi-intc-4.1", "xlnx,xps-intc-1.00.a"; + interrupt-controller ; + interrupt-names = "irq"; + interrupt-parent = <&gic>; + interrupts = <0 104 4>; + reg = <0x0 0xa0055000 0x0 0x1000>; + xlnx,kind-of-intr = <0x0>; + xlnx,num-intr-inputs = <0x3>; + }; + mipi_csi2_rx_mipi_csi2_rx_subsystem_0: mipi_csi2_rx_subsystem@a00f0000 { + clock-names = "lite_aclk", "dphy_clk_200M", "video_aclk"; + clocks = <&zynqmp_clk 71>, <&misc_clk_0>, <&zynqmp_clk 72>; + compatible = "xlnx,mipi-csi2-rx-subsystem-5.1"; + interrupt-names = "csirxss_csi_irq"; + interrupt-parent = <&axi_intc_0>; + interrupts = <0 2>; + reg = <0x0 0xa00f0000 0x0 0x10000>; + xlnx,axis-tdata-width = <32>; + xlnx,cal-mode = "NONE"; + xlnx,clk-io-swap = "false"; + xlnx,clk-lane-io-position = <0x1a>; + xlnx,clk-lp-io-swap = "false"; + xlnx,csi-en-activelanes = "true"; + xlnx,csi-en-crc = "true"; + xlnx,csi-filter-userdatatype = "true"; + xlnx,csi-opt1-regs = "false"; + xlnx,csi-pxl-format = "RAW10"; + xlnx,csi2rx-dbg = <0x0>; + xlnx,data-lane0-io-position = <0x2d>; + xlnx,data-lane1-io-position = <0x20>; + xlnx,data-lane2-io-position = <0x11>; + xlnx,data-lane3-io-position = <0x29>; + xlnx,dl0-io-swap = "false"; + xlnx,dl0-lp-io-swap = "false"; + xlnx,dl1-io-swap = "false"; + xlnx,dl1-lp-io-swap = "false"; + xlnx,dl2-io-swap = "false"; + xlnx,dl2-lp-io-swap = "false"; + xlnx,dl3-io-swap = "false"; + xlnx,dl3-lp-io-swap = "false"; + xlnx,dphy-lanes = <0x4>; + xlnx,dphy-mode = "SLAVE"; + xlnx,en-7s-linerate-check = "false"; + xlnx,en-active-lanes ; + xlnx,en-bg0-pin0 = "false"; + xlnx,en-bg0-pin6 = "false"; + xlnx,en-bg1-pin0 = "true"; + xlnx,en-bg1-pin6 = "false"; + xlnx,en-bg2-pin0 = "false"; + xlnx,en-bg2-pin6 = "false"; + xlnx,en-bg3-pin0 = "true"; + xlnx,en-bg3-pin6 = "false"; + xlnx,en-clk300m = "false"; + xlnx,en-cnts-byte-clk = "false"; + xlnx,en-csi-v2-0 = "false"; + xlnx,en-exdesigns = "false"; + xlnx,en-ext-tap = "false"; + xlnx,en-timeout-regs = "false"; + xlnx,en-vcx = "false"; + xlnx,esc-timeout = <0x6400>; + xlnx,exdes-board = "ZCU102"; + xlnx,exdes-config = "MIPI_Video_Pipe_Camera_to_Display"; + xlnx,exdes-fmc = "LI-IMX274MIPI-FMC V1.0 Single Sensor"; + xlnx,exdesboard-version = "xilinx.com:vck190:part0:2.0"; + xlnx,fifo-rd-en-control = "true"; + xlnx,hs-line-rate = <0x5a0>; + xlnx,hs-settle-ns = <0x8d>; + xlnx,hs-timeout = <0x10005>; + xlnx,idly-group-name = "mipi_csi2rx_idly_group"; + xlnx,idly-tap = <0x1>; + xlnx,init = <0x186a0>; + xlnx,int-qor-check = <0x0>; + xlnx,is-7series = "false"; + xlnx,is-versal = "false"; + xlnx,lprx-disable-extport = <0x0>; + xlnx,max-lanes = <4>; + xlnx,mipi-slv-int = <0x0>; + xlnx,ooc-vid-clk = "6.666"; + #ifdef CONFIG_ONE_PPC + xlnx,ppc = <1>; + #endif + #ifdef CONFIG_TWO_PPC + xlnx,ppc = <2>; + #endif + xlnx,rcve-alt-deskew-seq = "false"; + xlnx,rcve-deskew-seq = "false"; + xlnx,share-idlyctrl = "false"; + xlnx,stretch-line-rate = <0xdac>; + xlnx,vc = <4>; + xlnx,vfb ; + mipi_csi_portsmipi_csi2_rx_mipi_csi2_rx_subsystem_0: ports { + #address-cells = <1>; + #size-cells = <0>; + mipi_csi_port0mipi_csi2_rx_mipi_csi2_rx_subsystem_0: port@0 { + /* Fill cfa-pattern=rggb for raw data types, other fields video-format and video-width user needs to fill */ + reg = <0>; + xlnx,cfa-pattern = "rggb"; + xlnx,video-format = <12>; + xlnx,video-width = <8>; + mipi_csirx_outmipi_csi2_rx_mipi_csi2_rx_subsystem_0: endpoint { + remote-endpoint = <&mipi_csi2_rx_v_demosaic_0mipi_csi2_rx_mipi_csi2_rx_subsystem_0>; + }; + }; + mipi_csi_port1mipi_csi2_rx_mipi_csi2_rx_subsystem_0: port@1 { + /* Fill cfa-pattern=rggb for raw data types, other fields video-format,video-width user needs to fill */ + /* User need to add something like remote-endpoint=<&out> under the node csiss_in:endpoint */ + reg = <1>; + xlnx,cfa-pattern = "rggb"; + xlnx,video-format = <12>; + xlnx,video-width = <8>; + mipi_csi_inmipi_csi2_rx_mipi_csi2_rx_subsystem_0: endpoint { + }; + }; + }; + }; + misc_clk_0: misc_clk_0 { + #clock-cells = <0>; + clock-frequency = <199980000>; + compatible = "fixed-clock"; + }; + mipi_csi2_rx_v_demosaic_0: v_demosaic@a0250000 { + clock-names = "ap_clk"; + clocks = <&zynqmp_clk 72>; + compatible = "xlnx,v-demosaic-1.1", "xlnx,v-demosaic"; + reg = <0x0 0xa0250000 0x0 0x10000>; + reset-gpios = <&gpio 85 1>; + xlnx,max-height = <2160>; + xlnx,max-width = <3840>; + xlnx,s-axi-ctrl-addr-width = <0x6>; + xlnx,s-axi-ctrl-data-width = <0x20>; + demosaic_portsmipi_csi2_rx_v_demosaic_0: ports { + #address-cells = <1>; + #size-cells = <0>; + demosaic_port1mipi_csi2_rx_v_demosaic_0: port@1 { + /* For cfa-pattern=rggb user needs to fill as per BAYER format */ + reg = <1>; + xlnx,cfa-pattern = "rggb"; + xlnx,video-width = <8>; + demo_outmipi_csi2_rx_v_demosaic_0: endpoint { + remote-endpoint = <&mipi_csi2_rx_v_gamma_lut_0mipi_csi2_rx_v_demosaic_0>; + }; + }; + demosaic_port0mipi_csi2_rx_v_demosaic_0: port@0 { + /* For cfa-pattern=rggb user needs to fill as per BAYER format */ + reg = <0>; + xlnx,cfa-pattern = "rggb"; + xlnx,video-width = <8>; + mipi_csi2_rx_v_demosaic_0mipi_csi2_rx_mipi_csi2_rx_subsystem_0: endpoint { + remote-endpoint = <&mipi_csirx_outmipi_csi2_rx_mipi_csi2_rx_subsystem_0>; + }; + }; + }; + }; + mipi_csi2_rx_v_frmbuf_wr_0: v_frmbuf_wr@a0260000 { + #dma-cells = <1>; + clock-names = "ap_clk"; + clocks = <&zynqmp_clk 72>; + compatible = "xlnx,v-frmbuf-wr-2.2", "xlnx,axi-frmbuf-wr-v2.1"; + interrupt-names = "interrupt"; + interrupt-parent = <&axi_intc_0>; + interrupts = <1 2>; + reg = <0x0 0xa0260000 0x0 0x10000>; + reset-gpios = <&gpio 80 1>; + xlnx,dma-addr-width = <32>; + #ifdef CONFIG_ONE_PPC + xlnx,dma-align = <8>; + xlnx,pixels-per-clock = <1>; + #endif + #ifdef CONFIG_TWO_PPC + xlnx,dma-align = <16>; + xlnx,pixels-per-clock = <2>; + #endif + xlnx,max-height = <2160>; + xlnx,max-width = <3840>; + xlnx,s-axi-ctrl-addr-width = <0x7>; + xlnx,s-axi-ctrl-data-width = <0x20>; + xlnx,vid-formats = "bgr888", "xbgr8888", "xrgb8888", "uyvy", "y8", "vuy888", "xvuy8888", "yuyv", "nv12", "nv16"; + xlnx,video-width = <8>; + }; + mipi_csi2_rx_v_gamma_lut_0: v_gamma_lut@a0270000 { + clock-names = "ap_clk"; + clocks = <&zynqmp_clk 72>; + compatible = "xlnx,v-gamma-lut-1.1", "xlnx,v-gamma-lut"; + reg = <0x0 0xa0270000 0x0 0x10000>; + reset-gpios = <&gpio 86 1>; + xlnx,max-height = <2160>; + xlnx,max-width = <3840>; + xlnx,s-axi-ctrl-addr-width = <13>; + xlnx,s-axi-ctrl-data-width = <32>; + gamma_portsmipi_csi2_rx_v_gamma_lut_0: ports { + #address-cells = <1>; + #size-cells = <0>; + gamma_port1mipi_csi2_rx_v_gamma_lut_0: port@1 { + reg = <1>; + xlnx,video-width = <8>; + gamma_outmipi_csi2_rx_v_gamma_lut_0: endpoint { + remote-endpoint = <&mipi_csi2_rx_v_proc_ss_cscmipi_csi2_rx_v_gamma_lut_0>; + }; + }; + gamma_port0mipi_csi2_rx_v_gamma_lut_0: port@0 { + reg = <0>; + xlnx,video-width = <8>; + mipi_csi2_rx_v_gamma_lut_0mipi_csi2_rx_v_demosaic_0: endpoint { + remote-endpoint = <&demo_outmipi_csi2_rx_v_demosaic_0>; + }; + }; + }; + }; + mipi_csi2_rx_v_proc_ss_csc: v_proc_ss@a0240000 { + clock-names = "aclk"; + clocks = <&zynqmp_clk 72>; + compatible = "xlnx,v-proc-ss-2.3", "xlnx,vpss-csc", "xlnx,v-vpss-csc"; + reg = <0x0 0xa0240000 0x0 0x10000>; + reset-gpios = <&gpio 84 1>; + xlnx,colorspace-support = <0>; + xlnx,csc-enable-window = "false"; + xlnx,max-height = <2160>; + xlnx,max-width = <3840>; + xlnx,num-video-components = <3>; + #ifdef CONFIG_ONE_PPC + xlnx,samples-per-clk = <1>; + #endif + #ifdef CONFIG_TWO_PPC + xlnx,samples-per-clk = <2>; + #endif + xlnx,topology = <3>; + xlnx,use-uram = <0>; + xlnx,video-width = <8>; + csc_portsmipi_csi2_rx_v_proc_ss_csc: ports { + #address-cells = <1>; + #size-cells = <0>; + csc_port1mipi_csi2_rx_v_proc_ss_csc: port@1 { + /* For xlnx,video-format user needs to fill as per their requirement */ + reg = <1>; + xlnx,video-format = <3>; + xlnx,video-width = <8>; + csc_outmipi_csi2_rx_v_proc_ss_csc: endpoint { + remote-endpoint = <&mipi_csi2_rx_v_frmbuf_wr_0mipi_csi2_rx_v_proc_ss_scaler>; + }; + }; + csc_port0mipi_csi2_rx_v_proc_ss_csc: port@0 { + /* For xlnx,video-format user needs to fill as per their requirement */ + reg = <0>; + xlnx,video-format = <3>; + xlnx,video-width = <8>; + mipi_csi2_rx_v_proc_ss_cscmipi_csi2_rx_v_gamma_lut_0: endpoint { + remote-endpoint = <&gamma_outmipi_csi2_rx_v_gamma_lut_0>; + }; + }; + }; + }; + sensor_iic_0: i2c@a0054000 { + #address-cells = <1>; + #size-cells = <0>; + clock-names = "s_axi_aclk"; + clocks = <&zynqmp_clk 71>; + compatible = "xlnx,axi-iic-2.0", "xlnx,xps-iic-2.00.a"; + interrupt-names = "iic2intc_irpt"; + interrupt-parent = <&axi_intc_0>; + interrupts = <2 2>; + reg = <0x0 0xa0054000 0x0 0x1000>; + }; + vcap_mipi_csi2_rx_v_proc_ss_scaler { + compatible = "xlnx,video"; + dma-names = "port0"; + dmas = <&mipi_csi2_rx_v_frmbuf_wr_0 0>; + vcap_portsmipi_csi2_rx_v_proc_ss_scaler: ports { + #address-cells = <1>; + #size-cells = <0>; + vcap_portmipi_csi2_rx_v_proc_ss_scaler: port@0 { + direction = "input"; + reg = <0>; + mipi_csi2_rx_v_frmbuf_wr_0mipi_csi2_rx_v_proc_ss_scaler: endpoint { + remote-endpoint = <&csc_outmipi_csi2_rx_v_proc_ss_csc>; + }; + }; + }; + }; + }; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/system-top.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/system-top.dtsi new file mode 100644 index 0000000..f776b9d --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/system-top.dtsi @@ -0,0 +1,30 @@ +/* + * CAUTION: This file is automatically generated by Xilinx. + * Version: XSCT 2020.2 + * Today is: Wed Feb 9 18:55:34 2022 + */ + +#include "pl.dtsi" +#include "common/pcw.dtsi" +#include "common/li-imx274mipi-fmc.dtsi" +/ { + chosen { + bootargs = "earlycon clk_ignore_unused"; + stdout-path = "serial0:115200n8"; + }; + aliases { + ethernet0 = &gem3; + i2c0 = &i2c0; + i2c1 = &i2c1; + i2c2 = &sensor_iic_0; + serial0 = &uart0; + serial1 = &uart1; + spi0 = &qspi; + }; + memory { + device_type = "memory"; + reg = <0x0 0x0 0x0 0x7ff00000>, <0x00000008 0x00000000 0x0 0x80000000>; + }; +}; + +#include "top_wrapper.dtsi" diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/top_wrapper.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/top_wrapper.dtsi new file mode 100644 index 0000000..238f54c --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274/top_wrapper.dtsi @@ -0,0 +1,49 @@ +&amba_pl { + vid_stream_clk: vid_stream_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <150000000>; + }; + vid_s_axi_clk: vid_s_axi_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <99990000>; + }; + + mwipcore0: mwipcore@A0040000 { + compatible = "mathworks,mwipcore-v3.00"; + reg = <0x0 0xA0040000 0x0 0x10000>; + }; + +}; + +&i2c1 { + i2c-mux@74 { + /delete-node/ i2c@3; + }; +}; + + + +&sensor_iic_0 { + clocks = <&vid_s_axi_clk>; +}; + +&mipi_csi2_rx_mipi_csi2_rx_subsystem_0 { + compatible = "xlnx,mipi-csi2-rx-subsystem-2.0"; +}; + +&mipi_csi_inmipi_csi2_rx_mipi_csi2_rx_subsystem_0 { + data-lanes = <1 2 3 4>; + remote-endpoint = <&sensor_out>; +}; + +&mipi_csi2_rx_v_proc_ss_csc { + compatible = "xlnx,v-vpss-csc"; +}; + +&mipi_csi2_rx_v_frmbuf_wr_0 { + xlnx,dma-align = <32>; +}; + + diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_1ppc_150mhz.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_1ppc_150mhz.dts new file mode 100644 index 0000000..c855cb8 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_1ppc_150mhz.dts @@ -0,0 +1,8 @@ +#define CONFIG_ONE_PPC + +#include "base.dtsi" +#include "imx274/system-top.dtsi" +/{ + model = "IMX274MIPI-FMC ZynqMP ZCU106 RevA"; +}; + diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_1ppc_300mhz.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_1ppc_300mhz.dts new file mode 100644 index 0000000..88d6f60 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_1ppc_300mhz.dts @@ -0,0 +1,10 @@ +#define CONFIG_ONE_PPC + +#include "base.dtsi" +#include "imx274/system-top.dtsi" +/{ + model = "IMX274MIPI-FMC ZynqMP ZCU106 RevA"; +}; + +#include "imx274/common/stream_clk_300mhz.dtsi" + diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_2ppc_150mhz.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_2ppc_150mhz.dts new file mode 100644 index 0000000..2a50d75 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_2ppc_150mhz.dts @@ -0,0 +1,8 @@ +#define CONFIG_TWO_PPC + +#include "base.dtsi" +#include "imx274/system-top.dtsi" +/{ + model = "IMX274MIPI-FMC ZynqMP ZCU106 RevA"; +}; + diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_2ppc_300mhz.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_2ppc_300mhz.dts new file mode 100644 index 0000000..083dba3 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu106_imx/imx274_2ppc_300mhz.dts @@ -0,0 +1,10 @@ +#define CONFIG_TWO_PPC + +#include "base.dtsi" +#include "imx274/system-top.dtsi" +/{ + model = "IMX274MIPI-FMC ZynqMP ZCU106 RevA"; +}; + +#include "imx274/common/stream_clk_300mhz.dtsi" + diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu111/dts/hdlv.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu111/dts/hdlv.dts index 4053dd7..d50bb5a 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu111/dts/hdlv.dts +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu111/dts/hdlv.dts @@ -9,4 +9,4 @@ dr_mode = "peripheral"; snps,usb3_lpm_capable; maximum-speed ="super-speed"; -}; \ No newline at end of file +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu208/dts/hdlv.dts b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu208/dts/hdlv.dts new file mode 100644 index 0000000..a6cb323 --- /dev/null +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/boards/zcu208/dts/hdlv.dts @@ -0,0 +1,14 @@ +#include "base.dtsi" + +&usb0 { + status = "okay"; +}; + +&dwc3_0 { + status = "okay"; + dr_mode = "peripheral"; + snps,usb3_lpm_capable; + phy-names = "usb3-phy"; + phys=<&psgtr PHY_TYPE_USB3 0 2 26000000>; + maximum-speed ="super-speed"; +}; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynq-mw-ad9361-bypass-user-logic.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynq-mw-ad9361-bypass-user-logic.dtsi index 11f6ee7..410527b 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynq-mw-ad9361-bypass-user-logic.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynq-mw-ad9361-bypass-user-logic.dtsi @@ -2,7 +2,7 @@ &fpga_axi { mwbypassrx0: mwbypassrx@0 { compatible = "mathworks,mwipcore-v3.00"; - reg = <0x0 0x80000000 0x10000>; + reg = <0x80000000 0x10000>; #address-cells = <1>; #size-cells = <0>; mmrd-channel@0 { @@ -18,7 +18,7 @@ }; mwbypasstx0: mwbypasstx@0 { compatible = "mathworks,mwipcore-v3.00"; - reg = <0x0 0x80000000 0x10000>; + reg = <0x80000000 0x10000>; #address-cells = <1>; #size-cells = <0>; mmrd-channel@0 { diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis-xil.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis-xil.dtsi index 5db2b56..28af91c 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis-xil.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis-xil.dtsi @@ -1,12 +1,12 @@ #include "zynqmp-mw-axistream-iio-common.dtsi" &axi4stream_mm2s { - reg = <0x0 0x80020000 0x10000>; + reg = <0x80020000 0x10000>; clocks = <&adc0_ad9361 0 &adc0_ad9361 0 &adc0_ad9361 0>; }; &axi4stream_s2mm { - reg = <0x0 0x80030000 0x10000>; + reg = <0x80030000 0x10000>; clocks = <&adc0_ad9361 0 &adc0_ad9361 0 &adc0_ad9361 0>; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis.dtsi index 5938541..8b68b05 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361-axis.dtsi @@ -1,10 +1,10 @@ #include "zynqmp-adi-mw-axistream-iio-common.dtsi" &axi4stream_mm2s { - reg = <0x0 0x80020000 0x10000>; + reg = <0x80020000 0x10000>; }; &axi4stream_s2mm { - reg = <0x0 0x80030000 0x10000>; + reg = <0x80030000 0x10000>; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361.dtsi index 4acf723..36b02df 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-ad9361.dtsi @@ -12,9 +12,9 @@ }; &fpga_axi { - mwipcore0: mwipcore@0 { + mwipcore0: mwipcore@80000000 { compatible = "mathworks,mwipcore-v3.00"; - reg = <0x0 0x80000000 0x10000>; + reg = <0x80000000 0x10000>; }; }; @@ -23,8 +23,8 @@ #include "zynq-mw-ad9361-bypass-user-logic.dtsi" &mwbypassrx0 { - reg = <0x0 0x80060000 0x10000>; + reg = <0x80060000 0x10000>; }; &mwbypasstx0 { - reg = <0x0 0x80040000 0x10000>; + reg = <0x80040000 0x10000>; }; diff --git a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-hdmicam-common.dtsi b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-hdmicam-common.dtsi index 506c10c..13782b7 100644 --- a/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-hdmicam-common.dtsi +++ b/recipes-apps/mw-refdesign-dtb/files/zynqmp/dts/zynqmp-mw-hdmicam-common.dtsi @@ -13,7 +13,7 @@ reg = <1>; adv7511@39 { - compatible = "adv7511-media"; + compatible = "adv7511-v4l2"; reg = <0x39>; powerdown-gpio = <&pca9534 4 GPIO_ACTIVE_HIGH>; edid-addr = <0x3F>; diff --git a/recipes-apps/mw-refdesign-dtb/mw-refdesign-dtb.bb b/recipes-apps/mw-refdesign-dtb/mw-refdesign-dtb.bb index 8ce1cda..4bd5bb7 100644 --- a/recipes-apps/mw-refdesign-dtb/mw-refdesign-dtb.bb +++ b/recipes-apps/mw-refdesign-dtb/mw-refdesign-dtb.bb @@ -3,7 +3,6 @@ DESCRIPTION = "Generate device-tree blobs for use with MathWorks example referen LICENSE = "MIT" LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302" FILESEXTRAPATHS:prepend := "${THISDIR}/files:" - PACKAGE_ARCH = "${MACHINE_ARCH}" # Depend on the virtual/kernel for kernel source and cmake-native for CMake DEPENDS = "virtual/kernel cmake-native dtc-native" @@ -16,24 +15,29 @@ SRC_URI = " file://CMakeLists.txt \ S = "${WORKDIR}" -inherit deploy -# Inherit cmake to use the cmake class functions inherit cmake +inherit deploy # Pass the architecture and cross-compile prefix to CMake EXTRA_OECMAKE = " -DKERNEL_DIR=${STAGING_KERNEL_DIR} -DMACHINE_OVERRIDES=${MACHINEOVERRIDES} -DCROSS_COMPILE=${TARGET_PREFIX} " -do_deploy() { - install -d ${DEPLOYDIR} - install -d ${DEPLOYDIR}/boot/mwdtbs - install -Dm 0644 ${B}/*.dtb ${DEPLOYDIR}/boot/mwdtbs/ +# Install DTBs into the target rootfs so the runtime package exists +do_install() { + install -d ${D}/boot/mwdtbs + # Adjust source path if your build outputs DTBs elsewhere + install -m 0644 ${B}/*.dtb ${D}/boot/mwdtbs/ || true } -do_install() { - : +# Also deploy DTBs as build artifacts +do_deploy() { + install -d ${DEPLOYDIR}/boot/mwdtbs + install -m 0644 ${B}/*.dtb ${DEPLOYDIR}/boot/mwdtbs/ || true } + addtask deploy after do_compile before do_build +# Package the installed DTBs into the main runtime package FILES:${PN} += "/boot/mwdtbs/*.dtb" -INSANE_SKIP:${PN} += "installed-vs-skipped" +# Skip QA warning if some machines don’t produce files +INSANE_SKIP:${PN} += "installed-vs-skipped" diff --git a/recipes-core/images/petalinux-image-minimal.bbappend b/recipes-core/images/petalinux-image-minimal.bbappend index 4d350ed..566050f 100644 --- a/recipes-core/images/petalinux-image-minimal.bbappend +++ b/recipes-core/images/petalinux-image-minimal.bbappend @@ -11,8 +11,7 @@ IMAGE_INSTALL:append = " procps" IMAGE_INSTALL:append = " i2c-tools" IMAGE_INSTALL:append = " os-release" IMAGE_INSTALL:append = " gdbserver" - - +IMAGE_INSTALL:append = " libad9361-iio" inherit extrausers # Set root password as "root" (argument -p for usermod is obtained using 'mkpasswd -m sha-512 root') diff --git a/recipes-kernel/linux/linux-xlnx/0003-drivers-dma-xilinx-Add-chan-directions.patch b/recipes-kernel/linux/linux-xlnx/0003-drivers-dma-xilinx-Add-chan-directions.patch index 5293e50..3662771 100644 --- a/recipes-kernel/linux/linux-xlnx/0003-drivers-dma-xilinx-Add-chan-directions.patch +++ b/recipes-kernel/linux/linux-xlnx/0003-drivers-dma-xilinx-Add-chan-directions.patch @@ -1,8 +1,17 @@ +From b56e6003355023d7dbd34643045c4ef5d5fb940e Mon Sep 17 00:00:00 2001 +From: pusaxena +Date: Thu, 27 Feb 2025 00:58:46 +0530 +Subject: [PATCH] drivers/dma/xilinx: Add chan directions + +--- + drivers/dma/xilinx/xilinx_dma.c | 2 ++ + 1 file changed, 2 insertions(+) + diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c -index 97055271d8fa..6b139bdae02b 100644 +index 0a3b2e22f23d..5caae73744b9 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c -@@ -2814,6 +2814,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, +@@ -2855,6 +2855,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; @@ -10,7 +19,7 @@ index 97055271d8fa..6b139bdae02b 100644 chan->id = xdev->mm2s_chan_id++; chan->tdest = chan->id; -@@ -2831,6 +2832,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, +@@ -2872,6 +2873,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, of_device_is_compatible(node, "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; @@ -18,3 +27,6 @@ index 97055271d8fa..6b139bdae02b 100644 chan->id = xdev->s2mm_chan_id++; chan->tdest = chan->id - xdev->dma_config->max_channels / 2; chan->has_vflip = of_property_read_bool(node, +-- +2.25.1 + diff --git a/recipes-kernel/linux/linux-xlnx/0004-industrialio-buffer-dma-improvements.patch b/recipes-kernel/linux/linux-xlnx/0004-industrialio-buffer-dma-improvements.patch new file mode 100644 index 0000000..ef2e68e --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/0004-industrialio-buffer-dma-improvements.patch @@ -0,0 +1,26 @@ +From 20ea8542251deaadf66a88e9c45a0a255e7f8f64 Mon Sep 17 00:00:00 2001 +From: pusaxena +Date: Thu, 27 Feb 2025 02:21:16 +0530 +Subject: [PATCH] Manually applied patch for industrialio buffer dma + improvements + +--- + drivers/iio/buffer/industrialio-buffer-dma.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c +index c1c4ca5798b2..30702c360144 100644 +--- a/drivers/iio/buffer/industrialio-buffer-dma.c ++++ b/drivers/iio/buffer/industrialio-buffer-dma.c +@@ -91,7 +91,7 @@ + * callback is called from within the custom callback. + */ + +-static unsigned int iio_dma_buffer_max_block_size = SZ_16M; ++static unsigned int iio_dma_buffer_max_block_size = SZ_128M; + module_param_named(max_block_size, iio_dma_buffer_max_block_size, uint, 0644); + + static void iio_buffer_block_release(struct kref *kref) +-- +2.25.1 + diff --git a/recipes-kernel/linux/linux-xlnx/0005-Apply-patch-to-update-adv7604.c-for-720p30-support.patch b/recipes-kernel/linux/linux-xlnx/0005-Apply-patch-to-update-adv7604.c-for-720p30-support.patch new file mode 100644 index 0000000..6eccefd --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/0005-Apply-patch-to-update-adv7604.c-for-720p30-support.patch @@ -0,0 +1,32 @@ +From 000da3f797e898bed979398a228be757363e25d7 Mon Sep 17 00:00:00 2001 +From: pusaxena +Date: Wed, 30 Apr 2025 00:40:54 +0530 +Subject: [PATCH] Apply patch to update adv7604.c for 720p30 support + +--- + drivers/media/i2c/adv7604.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c +index 58d22ac94893..b9730a2e5a81 100644 +--- a/drivers/media/i2c/adv7604.c ++++ b/drivers/media/i2c/adv7604.c +@@ -221,7 +221,6 @@ static bool adv76xx_has_afe(struct adv76xx_state *state) + + /* Unsupported timings. This device cannot support 720p30. */ + static const struct v4l2_dv_timings adv76xx_timings_exceptions[] = { +- V4L2_DV_BT_CEA_1280X720P30, + { } + }; + +@@ -290,6 +289,7 @@ static const struct adv76xx_video_standards adv76xx_prim_mode_hdmi_comp[] = { + { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 }, + { V4L2_DV_BT_CEA_1280X720P24, 0x13, 0x04 }, + { V4L2_DV_BT_CEA_1280X720P25, 0x13, 0x03 }, ++ { V4L2_DV_BT_CEA_1280X720P30, 0x13, 0x02 }, + { V4L2_DV_BT_CEA_1280X720P50, 0x13, 0x01 }, + { V4L2_DV_BT_CEA_1280X720P60, 0x13, 0x00 }, + { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 }, +-- +2.25.1 + diff --git a/recipes-kernel/linux/linux-xlnx/0006-patch-for-IPCore-Interrupt-driver.patch b/recipes-kernel/linux/linux-xlnx/0006-patch-for-IPCore-Interrupt-driver.patch new file mode 100644 index 0000000..63edc8c --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/0006-patch-for-IPCore-Interrupt-driver.patch @@ -0,0 +1,28 @@ +From 6c48481533f1a6fd21269289c49b65895039322a Mon Sep 17 00:00:00 2001 +From: pusaxena +Date: Wed, 30 Apr 2025 10:40:36 +0530 +Subject: [PATCH] patch for IPCore Interrupt driver + +--- + drivers/misc/mathworks/mathworks_ip_common.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/misc/mathworks/mathworks_ip_common.c b/drivers/misc/mathworks/mathworks_ip_common.c +index cf3ae148e36e..abcb745515d5 100644 +--- a/drivers/misc/mathworks/mathworks_ip_common.c ++++ b/drivers/misc/mathworks/mathworks_ip_common.c +@@ -52,7 +52,10 @@ static irqreturn_t mathworks_ip_intr_handler(int irq, void * theIpcore) + /* thisIpcore->irq is starting Linux INT number for that DUT */ + /* Difference irq - thisIpcore->irq is relattive INT number as described above */ + /*relativeIntIndex = irq - thisIpcore->irq; currently supporting one Interrupt per DUT */ +- sysfs_notify_dirent(thisIpcore->irq_kn[0]); ++ if (thisIpcore->irq_kn[0]) ++ { ++ sysfs_notify_dirent(thisIpcore->irq_kn[0]); ++ } + return IRQ_HANDLED; + } + +-- +2.25.1 + diff --git a/recipes-kernel/linux/linux-xlnx/0007-patch-xilinx-dma.patch b/recipes-kernel/linux/linux-xlnx/0007-patch-xilinx-dma.patch new file mode 100644 index 0000000..dd5d7e5 --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/0007-patch-xilinx-dma.patch @@ -0,0 +1,2525 @@ +From f97bf3416a85386a7ffd7d9c1ffd4bb6c201fd0c Mon Sep 17 00:00:00 2001 +From: psharma +Date: Wed, 29 Oct 2025 13:55:27 +0530 +Subject: [PATCH] patch-xilinx-dma + +--- + drivers/dma/xilinx/xilinx_dma.c | 1486 ++++++++++++------------------- + include/linux/dma-mapping.h | 7 + + 2 files changed, 576 insertions(+), 917 deletions(-) + +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c +index 6b139bdae02b..429a49eebe90 100644 +--- a/drivers/dma/xilinx/xilinx_dma.c ++++ b/drivers/dma/xilinx/xilinx_dma.c +@@ -1,4 +1,3 @@ +-// SPDX-License-Identifier: GPL-2.0-or-later + /* + * DMA driver for Xilinx Video DMA Engine + * +@@ -26,11 +25,10 @@ + * Access (DMA) between a memory-mapped source address and a memory-mapped + * destination address. + * +- * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft +- * Xilinx IP that provides high-bandwidth direct memory access between +- * memory and AXI4-Stream target peripherals. It provides scatter gather +- * (SG) interface with multiple channels independent configuration support. +- * ++ * This program is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 2 of the License, or ++ * (at your option) any later version. + */ + + #include +@@ -74,9 +72,6 @@ + #define XILINX_DMA_DMACR_CIRC_EN BIT(1) + #define XILINX_DMA_DMACR_RUNSTOP BIT(0) + #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) +-#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) +-#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) +-#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) + + #define XILINX_DMA_REG_DMASR 0x0004 + #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) +@@ -91,7 +86,6 @@ + #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) + #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) + #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) +-#define XILINX_DMA_DMASR_SG_MASK BIT(3) + #define XILINX_DMA_DMASR_IDLE BIT(1) + #define XILINX_DMA_DMASR_HALTED BIT(0) + #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) +@@ -121,13 +115,8 @@ + #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) + #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) + +-#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec +-#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) +- + /* HW specific definitions */ +-#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 +-#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 +-#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 ++#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 + + #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ + (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ +@@ -169,18 +158,31 @@ + #define XILINX_DMA_REG_BTT 0x28 + + /* AXI DMA Specific Masks/Bit fields */ +-#define XILINX_DMA_MAX_TRANS_LEN_MIN 8 +-#define XILINX_DMA_MAX_TRANS_LEN_MAX 23 +-#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 ++#define XILINX_DMA_MAX_LEN_REG_WIDTH 23 ++#define XILINX_DMA_MIN_LEN_REG_WIDTH 8 ++#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) + #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) + #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) + #define XILINX_DMA_CR_COALESCE_SHIFT 16 + #define XILINX_DMA_BD_SOP BIT(27) + #define XILINX_DMA_BD_EOP BIT(26) ++#define XILINX_DMA_BD_CMPL BIT(31) + #define XILINX_DMA_COALESCE_MAX 255 + #define XILINX_DMA_NUM_DESCS 255 + #define XILINX_DMA_NUM_APP_WORDS 5 + ++/* Multi-Channel DMA Descriptor offsets*/ ++#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) ++#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) ++ ++/* Multi-Channel DMA Masks/Shifts */ ++#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) ++#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) ++#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) ++#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) ++#define XILINX_DMA_BD_STRIDE_SHIFT 0 ++#define XILINX_DMA_BD_VSIZE_SHIFT 19 ++ + /* AXI CDMA Specific Registers/Offsets */ + #define XILINX_CDMA_REG_SRCADDR 0x18 + #define XILINX_CDMA_REG_DSTADDR 0x20 +@@ -188,32 +190,8 @@ + /* AXI CDMA Specific Masks */ + #define XILINX_CDMA_CR_SGMODE BIT(3) + +-#define xilinx_prep_dma_addr_t(addr) \ +- ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) +- +-/* AXI MCDMA Specific Registers/Offsets */ +-#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 +-#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 +-#define XILINX_MCDMA_CHEN_OFFSET 0x0008 +-#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 +-#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 +-#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 +-#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) +-#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) +-#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) +-#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) +- +-/* AXI MCDMA Specific Masks/Shifts */ +-#define XILINX_MCDMA_COALESCE_SHIFT 16 +-#define XILINX_MCDMA_COALESCE_MAX 24 +-#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) +-#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) +-#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) +-#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) +-#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) +-#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) +-#define XILINX_MCDMA_BD_EOP BIT(30) +-#define XILINX_MCDMA_BD_SOP BIT(31) ++/* AXI DMA Descriptor Status Register */ ++ + + /** + * struct xilinx_vdma_desc_hw - Hardware Descriptor +@@ -242,8 +220,8 @@ struct xilinx_vdma_desc_hw { + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C +- * @reserved1: Reserved @0x10 +- * @reserved2: Reserved @0x14 ++ * @mcdma_control: Control field for mcdma @0x10 ++ * @vsize_stride: Vsize and Stride field for mcdma @0x14 + * @control: Control field @0x18 + * @status: Status field @0x1C + * @app: APP Fields @0x20 - 0x30 +@@ -253,34 +231,10 @@ struct xilinx_axidma_desc_hw { + u32 next_desc_msb; + u32 buf_addr; + u32 buf_addr_msb; +- u32 reserved1; +- u32 reserved2; +- u32 control; +- u32 status; +- u32 app[XILINX_DMA_NUM_APP_WORDS]; +-} __aligned(64); +- +-/** +- * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA +- * @next_desc: Next Descriptor Pointer @0x00 +- * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 +- * @buf_addr: Buffer address @0x08 +- * @buf_addr_msb: MSB of Buffer address @0x0C +- * @rsvd: Reserved field @0x10 +- * @control: Control Information field @0x14 +- * @status: Status field @0x18 +- * @sideband_status: Status of sideband signals @0x1C +- * @app: APP Fields @0x20 - 0x30 +- */ +-struct xilinx_aximcdma_desc_hw { +- u32 next_desc; +- u32 next_desc_msb; +- u32 buf_addr; +- u32 buf_addr_msb; +- u32 rsvd; ++ u32 mcdma_control; ++ u32 vsize_stride; + u32 control; + u32 status; +- u32 sideband_status; + u32 app[XILINX_DMA_NUM_APP_WORDS]; + } __aligned(64); + +@@ -330,18 +284,6 @@ struct xilinx_axidma_tx_segment { + dma_addr_t phys; + } __aligned(64); + +-/** +- * struct xilinx_aximcdma_tx_segment - Descriptor segment +- * @hw: Hardware descriptor +- * @node: Node in the descriptor segments list +- * @phys: Physical address of segment +- */ +-struct xilinx_aximcdma_tx_segment { +- struct xilinx_aximcdma_desc_hw hw; +- struct list_head node; +- dma_addr_t phys; +-} __aligned(64); +- + /** + * struct xilinx_cdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor +@@ -360,16 +302,24 @@ struct xilinx_cdma_tx_segment { + * @segments: TX segments list + * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. +- * @err: Whether the descriptor has an error. +- * @residue: Residue of the completed descriptor + */ + struct xilinx_dma_tx_descriptor { + struct dma_async_tx_descriptor async_tx; + struct list_head segments; + struct list_head node; + bool cyclic; +- bool err; +- u32 residue; ++}; ++ ++/** ++ * enum xilinx_dma_halt_mode - Halt modes for DMA engines ++ * @XILINX_DMA_HALT_MODE_NORMAL: Perform halt via CR access ++ * @XILINX_DMA_HALT_MODE_RESET_RETRY: Reset the DMA engine if the halt fails ++ * @XILINX_DMA_HALT_MODE_RESET_ALWAYS: Always reset the DMA engine to halt ++ */ ++enum xilinx_dma_halt_mode { ++ XILINX_DMA_HALT_MODE_NORMAL = 0x0, ++ XILINX_DMA_HALT_MODE_RESET_RETRY, ++ XILINX_DMA_HALT_MODE_RESET_ALWAYS, + }; + + /** +@@ -392,24 +342,22 @@ struct xilinx_dma_tx_descriptor { + * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. + * @genlock: Support genlock mode ++ * @no_coalesce: Do not coalesce interrupts + * @err: Channel has errors + * @idle: Check for channel idle +- * @terminating: Check for channel being synchronized by user + * @tasklet: Cleanup work after irq + * @config: Device configuration info + * @flush_on_fsync: Flush on Frame sync + * @desc_pendingcount: Descriptor pending count + * @ext_addr: Indicates 64 bit addressing is supported by dma channel + * @desc_submitcount: Descriptor h/w submitted count ++ * @residue: Residue for AXI DMA + * @seg_v: Statically allocated segments base +- * @seg_mv: Statically allocated segments base for MCDMA + * @seg_p: Physical allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers + * @cyclic_seg_p: Physical allocated segments base for cyclic dma + * @start_transfer: Differentiate b/w DMA IP's transfer +- * @stop_transfer: Differentiate b/w DMA IP's quiesce + * @tdest: TDEST value for mcdma +- * @has_vflip: S2MM vertical flip + */ + struct xilinx_dma_chan { + struct xilinx_dma_device *xdev; +@@ -430,40 +378,36 @@ struct xilinx_dma_chan { + bool has_sg; + bool cyclic; + bool genlock; ++ bool no_coalesce; + bool err; + bool idle; +- bool terminating; + struct tasklet_struct tasklet; + struct xilinx_vdma_config config; + bool flush_on_fsync; + u32 desc_pendingcount; + bool ext_addr; + u32 desc_submitcount; ++ u32 residue; + struct xilinx_axidma_tx_segment *seg_v; +- struct xilinx_aximcdma_tx_segment *seg_mv; + dma_addr_t seg_p; + struct xilinx_axidma_tx_segment *cyclic_seg_v; + dma_addr_t cyclic_seg_p; + void (*start_transfer)(struct xilinx_dma_chan *chan); +- int (*stop_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; +- bool has_vflip; + }; + + /** +- * enum xdma_ip_type - DMA IP type. ++ * enum xdma_ip_type: DMA IP type. + * +- * @XDMA_TYPE_AXIDMA: Axi dma ip. +- * @XDMA_TYPE_CDMA: Axi cdma ip. +- * @XDMA_TYPE_VDMA: Axi vdma ip. +- * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. ++ * XDMA_TYPE_AXIDMA: Axi dma ip. ++ * XDMA_TYPE_CDMA: Axi cdma ip. ++ * XDMA_TYPE_VDMA: Axi vdma ip. + * + */ + enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, +- XDMA_TYPE_AXIMCDMA + }; + + struct xilinx_dma_config { +@@ -471,8 +415,6 @@ struct xilinx_dma_config { + int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, + struct clk **tx_clk, struct clk **txs_clk, + struct clk **rx_clk, struct clk **rxs_clk); +- irqreturn_t (*irq_handler)(int irq, void *data); +- const int max_channels; + }; + + /** +@@ -480,8 +422,13 @@ struct xilinx_dma_config { + * @regs: I/O mapped base address + * @dev: Device Structure + * @common: DMA device structure ++ * @dma_parms: DMA Parameters structure + * @chan: Driver specific DMA channel ++ * @has_sg: Specifies whether Scatter-Gather is present or not ++ * @mcdma: Specifies whether Multi-Channel is present or not + * @flush_on_fsync: Flush on frame sync ++ * @max_length: Maximum length of a DMA transfer ++ * @halt_mode: How to halt the DMA engine + * @ext_addr: Indicates 64 bit addressing is supported by dma device + * @pdev: Platform device structure pointer + * @dma_config: DMA config structure +@@ -490,16 +437,20 @@ struct xilinx_dma_config { + * @txs_clk: DMA mm2s stream clock + * @rx_clk: DMA s2mm clock + * @rxs_clk: DMA s2mm stream clock +- * @s2mm_chan_id: DMA s2mm channel identifier +- * @mm2s_chan_id: DMA mm2s channel identifier +- * @max_buffer_len: Max buffer length ++ * @nr_channels: Number of channels DMA device supports ++ * @chan_id: DMA channel identifier + */ + struct xilinx_dma_device { + void __iomem *regs; + struct device *dev; + struct dma_device common; +- struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE]; ++ struct device_dma_parameters dma_parms; ++ struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; ++ bool has_sg; ++ bool mcdma; + u32 flush_on_fsync; ++ u32 max_length; ++ enum xilinx_dma_halt_mode halt_mode; + bool ext_addr; + struct platform_device *pdev; + const struct xilinx_dma_config *dma_config; +@@ -508,19 +459,21 @@ struct xilinx_dma_device { + struct clk *txs_clk; + struct clk *rx_clk; + struct clk *rxs_clk; +- u32 s2mm_chan_id; +- u32 mm2s_chan_id; +- u32 max_buffer_len; ++ u32 nr_channels; ++ u32 chan_id; + }; + ++/* Forward Declarations */ ++static int xilinx_dma_reset(struct xilinx_dma_chan *chan); ++ + /* Macros */ + #define to_xilinx_chan(chan) \ + container_of(chan, struct xilinx_dma_chan, common) + #define to_dma_tx_descriptor(tx) \ + container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) + #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ +- readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \ +- val, cond, delay_us, timeout_us) ++ readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ ++ cond, delay_us, timeout_us) + + /* IO accessors */ + static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) +@@ -611,18 +564,6 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + } + } + +-static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, +- struct xilinx_aximcdma_desc_hw *hw, +- dma_addr_t buf_addr, size_t sg_used) +-{ +- if (chan->ext_addr) { +- hw->buf_addr = lower_32_bits(buf_addr + sg_used); +- hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); +- } else { +- hw->buf_addr = buf_addr + sg_used; +- } +-} +- + /* ----------------------------------------------------------------------------- + * Descriptors and segments alloc and free + */ +@@ -690,33 +631,6 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) + } + spin_unlock_irqrestore(&chan->lock, flags); + +- if (!segment) +- dev_dbg(chan->dev, "Could not find free tx segment\n"); +- +- return segment; +-} +- +-/** +- * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment +- * @chan: Driver specific DMA channel +- * +- * Return: The allocated segment on success and NULL on failure. +- */ +-static struct xilinx_aximcdma_tx_segment * +-xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +-{ +- struct xilinx_aximcdma_tx_segment *segment = NULL; +- unsigned long flags; +- +- spin_lock_irqsave(&chan->lock, flags); +- if (!list_empty(&chan->free_seg_list)) { +- segment = list_first_entry(&chan->free_seg_list, +- struct xilinx_aximcdma_tx_segment, +- node); +- list_del(&segment->node); +- } +- spin_unlock_irqrestore(&chan->lock, flags); +- + return segment; + } + +@@ -731,17 +645,6 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) + hw->next_desc_msb = next_desc_msb; + } + +-static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) +-{ +- u32 next_desc = hw->next_desc; +- u32 next_desc_msb = hw->next_desc_msb; +- +- memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); +- +- hw->next_desc = next_desc; +- hw->next_desc_msb = next_desc_msb; +-} +- + /** + * xilinx_dma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel +@@ -755,20 +658,6 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, + list_add_tail(&segment->node, &chan->free_seg_list); + } + +-/** +- * xilinx_mcdma_free_tx_segment - Free transaction segment +- * @chan: Driver specific DMA channel +- * @segment: DMA transaction segment +- */ +-static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, +- struct xilinx_aximcdma_tx_segment * +- segment) +-{ +- xilinx_mcdma_clean_hw_desc(&segment->hw); +- +- list_add_tail(&segment->node, &chan->free_seg_list); +-} +- + /** + * xilinx_cdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel +@@ -792,7 +681,7 @@ static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, + } + + /** +- * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor ++ * xilinx_dma_tx_descriptor - Allocate transaction descriptor + * @chan: Driver specific DMA channel + * + * Return: The allocated descriptor on success and NULL on failure. +@@ -802,7 +691,7 @@ xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) + { + struct xilinx_dma_tx_descriptor *desc; + +- desc = kzalloc(sizeof(*desc), GFP_NOWAIT); ++ desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return NULL; + +@@ -823,7 +712,6 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, + struct xilinx_vdma_tx_segment *segment, *next; + struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; + struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; +- struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; + + if (!desc) + return; +@@ -839,18 +727,12 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, + list_del(&cdma_segment->node); + xilinx_cdma_free_tx_segment(chan, cdma_segment); + } +- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ } else { + list_for_each_entry_safe(axidma_segment, axidma_next, + &desc->segments, node) { + list_del(&axidma_segment->node); + xilinx_dma_free_tx_segment(chan, axidma_segment); + } +- } else { +- list_for_each_entry_safe(aximcdma_segment, aximcdma_next, +- &desc->segments, node) { +- list_del(&aximcdma_segment->node); +- xilinx_mcdma_free_tx_segment(chan, aximcdma_segment); +- } + } + + kfree(desc); +@@ -874,6 +756,30 @@ static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, + } + } + ++/** ++ * xilinx_dma_init_segs - Initialize the segment entries ++ * @chan: Driver specific DMA channel ++ * ++ * Must be called with lock held or during channel allocation ++ */ ++static void xilinx_dma_init_segs(struct xilinx_dma_chan *chan) ++{ ++ int i; ++ ++ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { ++ chan->seg_v[i].hw.next_desc = ++ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * ++ ((i + 1) % XILINX_DMA_NUM_DESCS)); ++ chan->seg_v[i].hw.next_desc_msb = ++ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * ++ ((i + 1) % XILINX_DMA_NUM_DESCS)); ++ chan->seg_v[i].phys = chan->seg_p + ++ sizeof(*chan->seg_v) * i; ++ list_add_tail(&chan->seg_v[i].node, ++ &chan->free_seg_list); ++ } ++} ++ + /** + * xilinx_dma_free_descriptors - Free channel descriptors + * @chan: Driver specific DMA channel +@@ -888,6 +794,13 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) + xilinx_dma_free_desc_list(chan, &chan->done_list); + xilinx_dma_free_desc_list(chan, &chan->active_list); + ++ /* Reset the free list to ensure the ordering is correct */ ++ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ INIT_LIST_HEAD(&chan->free_seg_list); ++ xilinx_dma_init_segs(chan); ++ } ++ ++ + spin_unlock_irqrestore(&chan->lock, flags); + } + +@@ -909,83 +822,15 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) + INIT_LIST_HEAD(&chan->free_seg_list); + spin_unlock_irqrestore(&chan->lock, flags); + +- /* Free memory that is allocated for BD */ +- dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * +- XILINX_DMA_NUM_DESCS, chan->seg_v, +- chan->seg_p); +- + /* Free Memory that is allocated for cyclic DMA Mode */ + dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), + chan->cyclic_seg_v, chan->cyclic_seg_p); + } + +- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { +- spin_lock_irqsave(&chan->lock, flags); +- INIT_LIST_HEAD(&chan->free_seg_list); +- spin_unlock_irqrestore(&chan->lock, flags); +- +- /* Free memory that is allocated for BD */ +- dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) * +- XILINX_DMA_NUM_DESCS, chan->seg_mv, +- chan->seg_p); +- } +- +- if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && +- chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { ++ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; + } +- +-} +- +-/** +- * xilinx_dma_get_residue - Compute residue for a given descriptor +- * @chan: Driver specific dma channel +- * @desc: dma transaction descriptor +- * +- * Return: The number of residue bytes for the descriptor. +- */ +-static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, +- struct xilinx_dma_tx_descriptor *desc) +-{ +- struct xilinx_cdma_tx_segment *cdma_seg; +- struct xilinx_axidma_tx_segment *axidma_seg; +- struct xilinx_aximcdma_tx_segment *aximcdma_seg; +- struct xilinx_cdma_desc_hw *cdma_hw; +- struct xilinx_axidma_desc_hw *axidma_hw; +- struct xilinx_aximcdma_desc_hw *aximcdma_hw; +- struct list_head *entry; +- u32 residue = 0; +- +- list_for_each(entry, &desc->segments) { +- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { +- cdma_seg = list_entry(entry, +- struct xilinx_cdma_tx_segment, +- node); +- cdma_hw = &cdma_seg->hw; +- residue += (cdma_hw->control - cdma_hw->status) & +- chan->xdev->max_buffer_len; +- } else if (chan->xdev->dma_config->dmatype == +- XDMA_TYPE_AXIDMA) { +- axidma_seg = list_entry(entry, +- struct xilinx_axidma_tx_segment, +- node); +- axidma_hw = &axidma_seg->hw; +- residue += (axidma_hw->control - axidma_hw->status) & +- chan->xdev->max_buffer_len; +- } else { +- aximcdma_seg = +- list_entry(entry, +- struct xilinx_aximcdma_tx_segment, +- node); +- aximcdma_hw = &aximcdma_seg->hw; +- residue += +- (aximcdma_hw->control - aximcdma_hw->status) & +- chan->xdev->max_buffer_len; +- } +- } +- +- return residue; + } + + /** +@@ -998,12 +843,14 @@ static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) + { +- struct dmaengine_desc_callback cb; ++ dma_async_tx_callback callback; ++ void *callback_param; + +- dmaengine_desc_get_callback(&desc->async_tx, &cb); +- if (dmaengine_desc_callback_valid(&cb)) { ++ callback = desc->async_tx.callback; ++ callback_param = desc->async_tx.callback_param; ++ if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); +- dmaengine_desc_callback_invoke(&cb, NULL); ++ callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } + } +@@ -1020,7 +867,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) + spin_lock_irqsave(&chan->lock, flags); + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { +- struct dmaengine_result result; ++ struct dmaengine_desc_callback cb; + + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); +@@ -1030,32 +877,17 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) + /* Remove from the list of running transactions */ + list_del(&desc->node); + +- if (unlikely(desc->err)) { +- if (chan->direction == DMA_DEV_TO_MEM) +- result.result = DMA_TRANS_READ_FAILED; +- else +- result.result = DMA_TRANS_WRITE_FAILED; +- } else { +- result.result = DMA_TRANS_NOERROR; +- } +- +- result.residue = desc->residue; +- + /* Run the link descriptor callback function */ +- spin_unlock_irqrestore(&chan->lock, flags); +- dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); +- spin_lock_irqsave(&chan->lock, flags); ++ dmaengine_desc_get_callback(&desc->async_tx, &cb); ++ if (dmaengine_desc_callback_valid(&cb)) { ++ spin_unlock_irqrestore(&chan->lock, flags); ++ dmaengine_desc_callback_invoke(&cb, NULL); ++ spin_lock_irqsave(&chan->lock, flags); ++ } + + /* Run any dependencies, then free the descriptor */ + dma_run_dependencies(&desc->async_tx); + xilinx_dma_free_tx_descriptor(chan, desc); +- +- /* +- * While we ran a callback the user called a terminate function, +- * which takes care of cleaning up any remaining descriptors +- */ +- if (chan->terminating) +- break; + } + + spin_unlock_irqrestore(&chan->lock, flags); +@@ -1063,11 +895,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) + + /** + * xilinx_dma_do_tasklet - Schedule completion tasklet +- * @t: Pointer to the Xilinx DMA channel structure ++ * @data: Pointer to the Xilinx DMA channel structure + */ +-static void xilinx_dma_do_tasklet(struct tasklet_struct *t) ++static void xilinx_dma_do_tasklet(unsigned long data) + { +- struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet); ++ struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; + + xilinx_dma_chan_desc_cleanup(chan); + } +@@ -1081,7 +913,6 @@ static void xilinx_dma_do_tasklet(struct tasklet_struct *t) + static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) + { + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); +- int i; + + /* Has this channel already been allocated? */ + if (chan->desc_pool) +@@ -1093,71 +924,19 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) + */ + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + /* Allocate the buffer descriptors. */ +- chan->seg_v = dma_alloc_coherent(chan->dev, +- sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, +- &chan->seg_p, GFP_KERNEL); +- if (!chan->seg_v) { +- dev_err(chan->dev, +- "unable to allocate channel %d descriptors\n", +- chan->id); +- return -ENOMEM; +- } +- /* +- * For cyclic DMA mode we need to program the tail Descriptor +- * register with a value which is not a part of the BD chain +- * so allocating a desc segment during channel allocation for +- * programming tail descriptor. +- */ +- chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, +- sizeof(*chan->cyclic_seg_v), +- &chan->cyclic_seg_p, +- GFP_KERNEL); +- if (!chan->cyclic_seg_v) { +- dev_err(chan->dev, +- "unable to allocate desc segment for cyclic DMA\n"); +- dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * +- XILINX_DMA_NUM_DESCS, chan->seg_v, +- chan->seg_p); +- return -ENOMEM; +- } +- chan->cyclic_seg_v->phys = chan->cyclic_seg_p; +- +- for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { +- chan->seg_v[i].hw.next_desc = +- lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * +- ((i + 1) % XILINX_DMA_NUM_DESCS)); +- chan->seg_v[i].hw.next_desc_msb = +- upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * +- ((i + 1) % XILINX_DMA_NUM_DESCS)); +- chan->seg_v[i].phys = chan->seg_p + +- sizeof(*chan->seg_v) * i; +- list_add_tail(&chan->seg_v[i].node, +- &chan->free_seg_list); +- } +- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { +- /* Allocate the buffer descriptors. */ +- chan->seg_mv = dma_alloc_coherent(chan->dev, +- sizeof(*chan->seg_mv) * ++ chan->seg_v = dma_zalloc_coherent(chan->dev, ++ sizeof(*chan->seg_v) * + XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); +- if (!chan->seg_mv) { ++ if (!chan->seg_v) { + dev_err(chan->dev, + "unable to allocate channel %d descriptors\n", + chan->id); + return -ENOMEM; + } +- for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { +- chan->seg_mv[i].hw.next_desc = +- lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * +- ((i + 1) % XILINX_DMA_NUM_DESCS)); +- chan->seg_mv[i].hw.next_desc_msb = +- upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * +- ((i + 1) % XILINX_DMA_NUM_DESCS)); +- chan->seg_mv[i].phys = chan->seg_p + +- sizeof(*chan->seg_mv) * i; +- list_add_tail(&chan->seg_mv[i].node, +- &chan->free_seg_list); +- } ++ ++ xilinx_dma_init_segs(chan); ++ + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", + chan->dev, +@@ -1173,14 +952,31 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) + } + + if (!chan->desc_pool && +- ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && +- chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { ++ (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { + dev_err(chan->dev, + "unable to allocate channel %d descriptor pool\n", + chan->id); + return -ENOMEM; + } + ++ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ /* ++ * For cyclic DMA mode we need to program the tail Descriptor ++ * register with a value which is not a part of the BD chain ++ * so allocating a desc segment during channel allocation for ++ * programming tail descriptor. ++ */ ++ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, ++ sizeof(*chan->cyclic_seg_v), ++ &chan->cyclic_seg_p, GFP_KERNEL); ++ if (!chan->cyclic_seg_v) { ++ dev_err(chan->dev, ++ "unable to allocate desc segment for cyclic DMA\n"); ++ return -ENOMEM; ++ } ++ chan->cyclic_seg_v->phys = chan->cyclic_seg_p; ++ } ++ + dma_cookie_init(dchan); + + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { +@@ -1198,34 +994,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) + return 0; + } + +-/** +- * xilinx_dma_calc_copysize - Calculate the amount of data to copy +- * @chan: Driver specific DMA channel +- * @size: Total data that needs to be copied +- * @done: Amount of data that has been already copied +- * +- * Return: Amount of data that has to be copied +- */ +-static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, +- int size, int done) +-{ +- size_t copy; +- +- copy = min_t(size_t, size - done, +- chan->xdev->max_buffer_len); +- +- if ((copy + done < size) && +- chan->xdev->common.copy_align) { +- /* +- * If this is not the last descriptor, make sure +- * the next one will be properly aligned +- */ +- copy = rounddown(copy, +- (1 << chan->xdev->common.copy_align)); +- } +- return copy; +-} +- + /** + * xilinx_dma_tx_status - Get DMA transaction status + * @dchan: DMA channel +@@ -1240,6 +1008,8 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, + { + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; ++ struct xilinx_axidma_tx_segment *segment; ++ struct xilinx_axidma_desc_hw *hw; + enum dma_status ret; + unsigned long flags; + u32 residue = 0; +@@ -1248,55 +1018,74 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, + if (ret == DMA_COMPLETE || !txstate) + return ret; + +- spin_lock_irqsave(&chan->lock, flags); +- if (!list_empty(&chan->active_list)) { ++ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ spin_lock_irqsave(&chan->lock, flags); ++ + desc = list_last_entry(&chan->active_list, + struct xilinx_dma_tx_descriptor, node); +- /* +- * VDMA and simple mode do not support residue reporting, so the +- * residue field will always be 0. +- */ +- if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) +- residue = xilinx_dma_get_residue(chan, desc); +- } +- spin_unlock_irqrestore(&chan->lock, flags); ++ if (chan->has_sg) { ++ list_for_each_entry(segment, &desc->segments, node) { ++ hw = &segment->hw; ++ residue += (hw->control - hw->status) & ++ XILINX_DMA_MAX_TRANS_LEN; ++ } ++ } ++ spin_unlock_irqrestore(&chan->lock, flags); + +- dma_set_residue(txstate, residue); ++ chan->residue = residue; ++ dma_set_residue(txstate, chan->residue); ++ } + + return ret; + } + + /** +- * xilinx_dma_stop_transfer - Halt DMA channel ++ * xilinx_dma_halt - Halt DMA channel + * @chan: Driver specific DMA channel +- * +- * Return: '0' on success and failure value on error + */ +-static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) ++static void xilinx_dma_halt(struct xilinx_dma_chan *chan) + { +- u32 val; ++ int err, do_retry = 0; ++ u32 val, ctrl_reg; + +- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); ++ /* Preserve the ctrl reg state if we need to reset */ ++ ctrl_reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); ++ ctrl_reg &= ~XILINX_DMA_DMACR_RUNSTOP; + +- /* Wait for the hardware to halt */ +- return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, +- val & XILINX_DMA_DMASR_HALTED, 0, +- XILINX_DMA_LOOP_COUNT); +-} ++ if (chan->xdev->halt_mode == XILINX_DMA_HALT_MODE_RESET_ALWAYS) { ++ xilinx_dma_reset(chan); ++ dev_dbg(chan->dev, "Forcing reset of channel %p\n", chan); ++ } + +-/** +- * xilinx_cdma_stop_transfer - Wait for the current transfer to complete +- * @chan: Driver specific DMA channel +- * +- * Return: '0' on success and failure value on error +- */ +-static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) +-{ +- u32 val; ++ do { ++ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); + +- return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, +- val & XILINX_DMA_DMASR_IDLE, 0, +- XILINX_DMA_LOOP_COUNT); ++ /* Wait for the hardware to halt */ ++ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, ++ (val & XILINX_DMA_DMASR_HALTED), 0, ++ XILINX_DMA_LOOP_COUNT); ++ ++ if (err && !do_retry && ++ (chan->xdev->halt_mode == XILINX_DMA_HALT_MODE_RESET_RETRY || ++ chan->xdev->halt_mode == XILINX_DMA_HALT_MODE_RESET_ALWAYS)) { ++ /* Reset the DMA engine if the halt attempt failed */ ++ xilinx_dma_reset(chan); ++ do_retry = 1; ++ dev_dbg(chan->dev, "Failed to halt channel %p, resetting\n", ++ chan); ++ } else { ++ /* Do not retry more than once */ ++ do_retry = 0; ++ } ++ ++ } while (do_retry); ++ ++ if (err) { ++ dev_err(chan->dev, "Cannot stop channel %p: %x\n", ++ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); ++ chan->err = true; ++ } ++ chan->idle = true; + } + + /** +@@ -1330,10 +1119,9 @@ static void xilinx_dma_start(struct xilinx_dma_chan *chan) + static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) + { + struct xilinx_vdma_config *config = &chan->config; +- struct xilinx_dma_tx_descriptor *desc; ++ struct xilinx_dma_tx_descriptor *desc, *tail_desc; + u32 reg, j; +- struct xilinx_vdma_tx_segment *segment, *last = NULL; +- int i = 0; ++ struct xilinx_vdma_tx_segment *tail_segment; + + /* This function was invoked with lock held */ + if (chan->err) +@@ -1347,16 +1135,21 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) + + desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); ++ tail_desc = list_last_entry(&chan->pending_list, ++ struct xilinx_dma_tx_descriptor, node); + +- /* Configure the hardware using info in the config structure */ +- if (chan->has_vflip) { +- reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); +- reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; +- reg |= config->vflip_en; +- dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, +- reg); +- } ++ tail_segment = list_last_entry(&tail_desc->segments, ++ struct xilinx_vdma_tx_segment, node); + ++ /* ++ * If hardware is idle, then all descriptors on the running lists are ++ * done, start new transfers ++ */ ++ if (chan->has_sg) ++ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, ++ desc->async_tx.phys); ++ ++ /* Configure the hardware using info in the config structure */ + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + + if (config->frm_cnt_en) +@@ -1364,11 +1157,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) + else + reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; + +- /* If not parking, enable circular mode */ ++ /* ++ * With SG, start with circular mode, so that BDs can be fetched. ++ * In direct register mode, if not parking, enable circular mode ++ */ ++ if (chan->has_sg || !config->park) ++ reg |= XILINX_DMA_DMACR_CIRC_EN; ++ + if (config->park) + reg &= ~XILINX_DMA_DMACR_CIRC_EN; +- else +- reg |= XILINX_DMA_DMACR_CIRC_EN; + + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + +@@ -1390,37 +1187,48 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) + return; + + /* Start the transfer */ +- if (chan->desc_submitcount < chan->num_frms) +- i = chan->desc_submitcount; +- +- list_for_each_entry(segment, &desc->segments, node) { +- if (chan->ext_addr) +- vdma_desc_write_64(chan, +- XILINX_VDMA_REG_START_ADDRESS_64(i++), +- segment->hw.buf_addr, +- segment->hw.buf_addr_msb); +- else +- vdma_desc_write(chan, +- XILINX_VDMA_REG_START_ADDRESS(i++), +- segment->hw.buf_addr); ++ if (chan->has_sg) { ++ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, ++ tail_segment->phys); ++ list_splice_tail_init(&chan->pending_list, &chan->active_list); ++ chan->desc_pendingcount = 0; ++ } else { ++ struct xilinx_vdma_tx_segment *segment, *last = NULL; ++ int i = 0; ++ ++ if (chan->desc_submitcount < chan->num_frms) ++ i = chan->desc_submitcount; ++ ++ list_for_each_entry(segment, &desc->segments, node) { ++ if (chan->ext_addr) ++ vdma_desc_write_64(chan, ++ XILINX_VDMA_REG_START_ADDRESS_64(i++), ++ segment->hw.buf_addr, ++ segment->hw.buf_addr_msb); ++ else ++ vdma_desc_write(chan, ++ XILINX_VDMA_REG_START_ADDRESS(i++), ++ segment->hw.buf_addr); + +- last = segment; +- } ++ last = segment; ++ } + +- if (!last) +- return; ++ if (!last) ++ return; + +- /* HW expects these parameters to be same for one transaction */ +- vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); +- vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, +- last->hw.stride); +- vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); ++ /* HW expects these parameters to be same for one transaction */ ++ vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); ++ vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, ++ last->hw.stride); ++ vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); + +- chan->desc_submitcount++; +- chan->desc_pendingcount--; +- list_move_tail(&desc->node, &chan->active_list); +- if (chan->desc_submitcount == chan->num_frms) +- chan->desc_submitcount = 0; ++ chan->desc_submitcount++; ++ chan->desc_pendingcount--; ++ list_del(&desc->node); ++ list_add_tail(&desc->node, &chan->active_list); ++ if (chan->desc_submitcount == chan->num_frms) ++ chan->desc_submitcount = 0; ++ } + + chan->idle = false; + } +@@ -1451,7 +1259,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_cdma_tx_segment, node); + +- if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { ++ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX && ++ !chan->no_coalesce) { + ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; + ctrl_reg |= chan->desc_pendingcount << + XILINX_DMA_CR_COALESCE_SHIFT; +@@ -1459,12 +1268,6 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) + } + + if (chan->has_sg) { +- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, +- XILINX_CDMA_CR_SGMODE); +- +- dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, +- XILINX_CDMA_CR_SGMODE); +- + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + +@@ -1473,97 +1276,21 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) + tail_segment->phys); + } else { + /* In simple mode */ +- struct xilinx_cdma_tx_segment *segment; +- struct xilinx_cdma_desc_hw *hw; +- +- segment = list_first_entry(&head_desc->segments, +- struct xilinx_cdma_tx_segment, +- node); +- +- hw = &segment->hw; +- +- xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, +- xilinx_prep_dma_addr_t(hw->src_addr)); +- xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, +- xilinx_prep_dma_addr_t(hw->dest_addr)); +- +- /* Start the transfer */ +- dma_ctrl_write(chan, XILINX_DMA_REG_BTT, +- hw->control & chan->xdev->max_buffer_len); +- } +- +- list_splice_tail_init(&chan->pending_list, &chan->active_list); +- chan->desc_pendingcount = 0; +- chan->idle = false; +-} +- +-/** +- * xilinx_dma_start_transfer - Starts DMA transfer +- * @chan: Driver specific channel struct pointer +- */ +-static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) +-{ +- struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; +- struct xilinx_axidma_tx_segment *tail_segment; +- u32 reg; +- +- if (chan->err) +- return; +- +- if (list_empty(&chan->pending_list)) +- return; +- +- if (!chan->idle) +- return; +- +- head_desc = list_first_entry(&chan->pending_list, +- struct xilinx_dma_tx_descriptor, node); +- tail_desc = list_last_entry(&chan->pending_list, +- struct xilinx_dma_tx_descriptor, node); +- tail_segment = list_last_entry(&tail_desc->segments, +- struct xilinx_axidma_tx_segment, node); +- +- reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); +- +- if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { +- reg &= ~XILINX_DMA_CR_COALESCE_MAX; +- reg |= chan->desc_pendingcount << +- XILINX_DMA_CR_COALESCE_SHIFT; +- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); +- } +- +- if (chan->has_sg) +- xilinx_write(chan, XILINX_DMA_REG_CURDESC, +- head_desc->async_tx.phys); +- +- xilinx_dma_start(chan); +- +- if (chan->err) +- return; +- +- /* Start the transfer */ +- if (chan->has_sg) { +- if (chan->cyclic) +- xilinx_write(chan, XILINX_DMA_REG_TAILDESC, +- chan->cyclic_seg_v->phys); +- else +- xilinx_write(chan, XILINX_DMA_REG_TAILDESC, +- tail_segment->phys); +- } else { +- struct xilinx_axidma_tx_segment *segment; +- struct xilinx_axidma_desc_hw *hw; ++ struct xilinx_cdma_tx_segment *segment; ++ struct xilinx_cdma_desc_hw *hw; + + segment = list_first_entry(&head_desc->segments, +- struct xilinx_axidma_tx_segment, ++ struct xilinx_cdma_tx_segment, + node); ++ + hw = &segment->hw; + +- xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, +- xilinx_prep_dma_addr_t(hw->buf_addr)); ++ xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); ++ xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + + /* Start the transfer */ + dma_ctrl_write(chan, XILINX_DMA_REG_BTT, +- hw->control & chan->xdev->max_buffer_len); ++ hw->control & XILINX_DMA_MAX_TRANS_LEN); + } + + list_splice_tail_init(&chan->pending_list, &chan->active_list); +@@ -1572,20 +1299,15 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) + } + + /** +- * xilinx_mcdma_start_transfer - Starts MCDMA transfer ++ * xilinx_dma_start_transfer - Starts DMA transfer + * @chan: Driver specific channel struct pointer + */ +-static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) ++static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) + { + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; +- struct xilinx_aximcdma_tx_segment *tail_segment; ++ struct xilinx_axidma_tx_segment *tail_segment; + u32 reg; + +- /* +- * lock has been held by calling functions, so we don't need it +- * to take it here again. +- */ +- + if (chan->err) + return; + +@@ -1600,32 +1322,37 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, +- struct xilinx_aximcdma_tx_segment, node); ++ struct xilinx_axidma_tx_segment, node); + +- reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); ++ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + +- if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { +- reg &= ~XILINX_MCDMA_COALESCE_MASK; ++ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX && ++ !chan->no_coalesce) { ++ reg &= ~XILINX_DMA_CR_COALESCE_MAX; + reg |= chan->desc_pendingcount << +- XILINX_MCDMA_COALESCE_SHIFT; ++ XILINX_DMA_CR_COALESCE_SHIFT; ++ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + } + +- reg |= XILINX_MCDMA_IRQ_ALL_MASK; +- dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); +- +- /* Program current descriptor */ +- xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), +- head_desc->async_tx.phys); +- +- /* Program channel enable register */ +- reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); +- reg |= BIT(chan->tdest); +- dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg); ++ if (chan->has_sg && !chan->xdev->mcdma) ++ xilinx_write(chan, XILINX_DMA_REG_CURDESC, ++ head_desc->async_tx.phys); + +- /* Start the fetch of BDs for the channel */ +- reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); +- reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; +- dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); ++ if (chan->has_sg && chan->xdev->mcdma) { ++ if (chan->direction == DMA_MEM_TO_DEV) { ++ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, ++ head_desc->async_tx.phys); ++ } else { ++ if (!chan->tdest) { ++ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, ++ head_desc->async_tx.phys); ++ } else { ++ dma_ctrl_write(chan, ++ XILINX_DMA_MCRX_CDESC(chan->tdest), ++ head_desc->async_tx.phys); ++ } ++ } ++ } + + xilinx_dma_start(chan); + +@@ -1633,8 +1360,42 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) + return; + + /* Start the transfer */ +- xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), +- tail_segment->phys); ++ if (chan->has_sg && !chan->xdev->mcdma) { ++ if (chan->cyclic) ++ xilinx_write(chan, XILINX_DMA_REG_TAILDESC, ++ chan->cyclic_seg_v->phys); ++ else ++ xilinx_write(chan, XILINX_DMA_REG_TAILDESC, ++ tail_segment->phys); ++ } else if (chan->has_sg && chan->xdev->mcdma) { ++ if (chan->direction == DMA_MEM_TO_DEV) { ++ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, ++ tail_segment->phys); ++ } else { ++ if (!chan->tdest) { ++ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, ++ tail_segment->phys); ++ } else { ++ dma_ctrl_write(chan, ++ XILINX_DMA_MCRX_TDESC(chan->tdest), ++ tail_segment->phys); ++ } ++ } ++ } else { ++ struct xilinx_axidma_tx_segment *segment; ++ struct xilinx_axidma_desc_hw *hw; ++ ++ segment = list_first_entry(&head_desc->segments, ++ struct xilinx_axidma_tx_segment, ++ node); ++ hw = &segment->hw; ++ ++ xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); ++ ++ /* Start the transfer */ ++ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, ++ hw->control & XILINX_DMA_MAX_TRANS_LEN); ++ } + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; +@@ -1655,17 +1416,6 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan) + spin_unlock_irqrestore(&chan->lock, flags); + } + +-/** +- * xilinx_dma_device_config - Configure the DMA channel +- * @dchan: DMA channel +- * @config: channel configuration +- */ +-static int xilinx_dma_device_config(struct dma_chan *dchan, +- struct dma_slave_config *config) +-{ +- return 0; +-} +- + /** + * xilinx_dma_complete_descriptor - Mark the active descriptor as complete + * @chan : xilinx DMA channel +@@ -1675,18 +1425,34 @@ static int xilinx_dma_device_config(struct dma_chan *dchan, + static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) + { + struct xilinx_dma_tx_descriptor *desc, *next; ++ struct xilinx_axidma_tx_segment *segment; ++ bool desc_done; + + /* This function was invoked with lock held */ + if (list_empty(&chan->active_list)) + return; + + list_for_each_entry_safe(desc, next, &chan->active_list, node) { +- if (chan->has_sg && chan->xdev->dma_config->dmatype != +- XDMA_TYPE_VDMA) +- desc->residue = xilinx_dma_get_residue(chan, desc); +- else +- desc->residue = 0; +- desc->err = chan->err; ++ if (chan->no_coalesce) { ++ /* Level interrupt may effectively coalsece */ ++ segment = list_last_entry(&desc->segments, ++ struct xilinx_axidma_tx_segment, node); ++ if (!(segment->hw.status & XILINX_DMA_BD_CMPL)) { ++ desc_done = false; ++ if (chan->direction == DMA_DEV_TO_MEM) { ++ /* check for partial transfer completion */ ++ list_for_each_entry(segment, &desc->segments, node) { ++ if((segment->hw.status & XILINX_DMA_BD_CMPL) && ++ (segment->hw.status & XILINX_DMA_BD_EOP)) { ++ desc_done = true; ++ break; ++ } ++ } ++ } ++ if (!desc_done) ++ return; ++ } ++ } + + list_del(&desc->node); + if (!desc->cyclic) +@@ -1722,7 +1488,6 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) + + chan->err = false; + chan->idle = true; +- chan->desc_pendingcount = 0; + chan->desc_submitcount = 0; + + return err; +@@ -1750,74 +1515,6 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) + return 0; + } + +-/** +- * xilinx_mcdma_irq_handler - MCDMA Interrupt handler +- * @irq: IRQ number +- * @data: Pointer to the Xilinx MCDMA channel structure +- * +- * Return: IRQ_HANDLED/IRQ_NONE +- */ +-static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) +-{ +- struct xilinx_dma_chan *chan = data; +- u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; +- +- if (chan->direction == DMA_DEV_TO_MEM) +- ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; +- else +- ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; +- +- /* Read the channel id raising the interrupt*/ +- chan_sermask = dma_ctrl_read(chan, ser_offset); +- chan_id = ffs(chan_sermask); +- +- if (!chan_id) +- return IRQ_NONE; +- +- if (chan->direction == DMA_DEV_TO_MEM) +- chan_offset = chan->xdev->dma_config->max_channels / 2; +- +- chan_offset = chan_offset + (chan_id - 1); +- chan = chan->xdev->chan[chan_offset]; +- /* Read the status and ack the interrupts. */ +- status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); +- if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) +- return IRQ_NONE; +- +- dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), +- status & XILINX_MCDMA_IRQ_ALL_MASK); +- +- if (status & XILINX_MCDMA_IRQ_ERR_MASK) { +- dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", +- chan, +- dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), +- dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET +- (chan->tdest)), +- dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET +- (chan->tdest))); +- chan->err = true; +- } +- +- if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { +- /* +- * Device takes too long to do the transfer when user requires +- * responsiveness. +- */ +- dev_dbg(chan->dev, "Inter-packet latency too long\n"); +- } +- +- if (status & XILINX_MCDMA_IRQ_IOC_MASK) { +- spin_lock(&chan->lock); +- xilinx_dma_complete_descriptor(chan); +- chan->idle = true; +- chan->start_transfer(chan); +- spin_unlock(&chan->lock); +- } +- +- tasklet_schedule(&chan->tasklet); +- return IRQ_HANDLED; +-} +- + /** + * xilinx_dma_irq_handler - DMA Interrupt handler + * @irq: IRQ number +@@ -1873,7 +1570,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) + if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { + spin_lock(&chan->lock); + xilinx_dma_complete_descriptor(chan); +- chan->idle = true; ++ if (list_empty(&chan->active_list)) ++ chan->idle = true; + chan->start_transfer(chan); + spin_unlock(&chan->lock); + } +@@ -1893,7 +1591,6 @@ static void append_desc_queue(struct xilinx_dma_chan *chan, + struct xilinx_vdma_tx_segment *tail_segment; + struct xilinx_dma_tx_descriptor *tail_desc; + struct xilinx_axidma_tx_segment *axidma_tail_segment; +- struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment; + struct xilinx_cdma_tx_segment *cdma_tail_segment; + + if (list_empty(&chan->pending_list)) +@@ -1915,17 +1612,11 @@ static void append_desc_queue(struct xilinx_dma_chan *chan, + struct xilinx_cdma_tx_segment, + node); + cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; +- } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ } else { + axidma_tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, + node); + axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; +- } else { +- aximcdma_tail_segment = +- list_last_entry(&tail_desc->segments, +- struct xilinx_aximcdma_tx_segment, +- node); +- aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; + } + + /* +@@ -1982,8 +1673,6 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) + if (desc->cyclic) + chan->cyclic = true; + +- chan->terminating = false; +- + spin_unlock_irqrestore(&chan->lock, flags); + + return cookie; +@@ -2005,7 +1694,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, + { + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; +- struct xilinx_vdma_tx_segment *segment; ++ struct xilinx_vdma_tx_segment *segment, *prev = NULL; + struct xilinx_vdma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) +@@ -2059,6 +1748,8 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + ++ prev = segment; ++ + /* Link the last hardware descriptor with the first. */ + segment = list_first_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); +@@ -2087,10 +1778,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, + { + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; +- struct xilinx_cdma_tx_segment *segment; ++ struct xilinx_cdma_tx_segment *segment, *prev; + struct xilinx_cdma_desc_hw *hw; + +- if (!len || len > chan->xdev->max_buffer_len) ++ if (!len || len > XILINX_DMA_MAX_TRANS_LEN) + return NULL; + + desc = xilinx_dma_alloc_tx_descriptor(chan); +@@ -2114,11 +1805,21 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, + hw->dest_addr_msb = upper_32_bits(dma_dst); + } + ++ /* Fill the previous next descriptor with current */ ++ prev = list_last_entry(&desc->segments, ++ struct xilinx_cdma_tx_segment, node); ++ prev->hw.next_desc = segment->phys; ++ + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + ++ prev = segment; ++ ++ /* Link the last hardware descriptor with the first. */ ++ segment = list_first_entry(&desc->segments, ++ struct xilinx_cdma_tx_segment, node); + desc->async_tx.phys = segment->phys; +- hw->next_desc = segment->phys; ++ prev->hw.next_desc = segment->phys; + + return &desc->async_tx; + +@@ -2180,8 +1881,17 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ +- copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), +- sg_used); ++ copy = min_t(size_t, sg_dma_len(sg) - sg_used, ++ chan->xdev->max_length); ++ if ((copy + sg_used < sg_dma_len(sg)) && ++ chan->xdev->common.copy_align) { ++ /* If this is not the last descriptor, make sure ++ * the next one will be properly aligned ++ */ ++ copy = rounddown(copy, ++ (1 << chan->xdev->common.copy_align)); ++ } ++ + hw = &segment->hw; + + /* Fill in the descriptor */ +@@ -2285,8 +1995,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ +- copy = xilinx_dma_calc_copysize(chan, period_len, +- sg_used); ++ copy = min_t(size_t, period_len - sg_used, ++ XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); +@@ -2334,32 +2044,31 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + } + + /** +- * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction ++ * xilinx_dma_prep_interleaved - prepare a descriptor for a ++ * DMA_SLAVE transaction + * @dchan: DMA channel +- * @sgl: scatterlist to transfer to/from +- * @sg_len: number of entries in @scatterlist +- * @direction: DMA direction ++ * @xt: Interleaved template pointer + * @flags: transfer ack flags +- * @context: APP words of the descriptor + * + * Return: Async transaction descriptor on success and NULL on failure + */ + static struct dma_async_tx_descriptor * +-xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, +- unsigned int sg_len, +- enum dma_transfer_direction direction, +- unsigned long flags, void *context) ++xilinx_dma_prep_interleaved(struct dma_chan *dchan, ++ struct dma_interleaved_template *xt, ++ unsigned long flags) + { + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; +- struct xilinx_aximcdma_tx_segment *segment = NULL; +- u32 *app_w = (u32 *)context; +- struct scatterlist *sg; +- size_t copy; +- size_t sg_used; +- unsigned int i; ++ struct xilinx_axidma_tx_segment *segment; ++ struct xilinx_axidma_desc_hw *hw; + +- if (!is_slave_direction(direction)) ++ if (!is_slave_direction(xt->dir)) ++ return NULL; ++ ++ if (!xt->numf || !xt->sgl[0].size) ++ return NULL; ++ ++ if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ +@@ -2367,67 +2076,54 @@ xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + if (!desc) + return NULL; + ++ chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + +- /* Build transactions using information in the scatter gather list */ +- for_each_sg(sgl, sg, sg_len, i) { +- sg_used = 0; +- +- /* Loop until the entire scatterlist entry is used */ +- while (sg_used < sg_dma_len(sg)) { +- struct xilinx_aximcdma_desc_hw *hw; ++ /* Get a free segment */ ++ segment = xilinx_axidma_alloc_tx_segment(chan); ++ if (!segment) ++ goto error; + +- /* Get a free segment */ +- segment = xilinx_aximcdma_alloc_tx_segment(chan); +- if (!segment) +- goto error; ++ hw = &segment->hw; + +- /* +- * Calculate the maximum number of bytes to transfer, +- * making sure it is less than the hw limit +- */ +- copy = min_t(size_t, sg_dma_len(sg) - sg_used, +- chan->xdev->max_buffer_len); +- hw = &segment->hw; ++ /* Fill in the descriptor */ ++ if (xt->dir != DMA_MEM_TO_DEV) ++ hw->buf_addr = xt->dst_start; ++ else ++ hw->buf_addr = xt->src_start; + +- /* Fill in the descriptor */ +- xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), +- sg_used); +- hw->control = copy; ++ hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; ++ hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & ++ XILINX_DMA_BD_VSIZE_MASK; ++ hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & ++ XILINX_DMA_BD_STRIDE_MASK; ++ hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + +- if (chan->direction == DMA_MEM_TO_DEV && app_w) { +- memcpy(hw->app, app_w, sizeof(u32) * +- XILINX_DMA_NUM_APP_WORDS); +- } ++ /* ++ * Insert the segment into the descriptor segments ++ * list. ++ */ ++ list_add_tail(&segment->node, &desc->segments); + +- sg_used += copy; +- /* +- * Insert the segment into the descriptor segments +- * list. +- */ +- list_add_tail(&segment->node, &desc->segments); +- } +- } + + segment = list_first_entry(&desc->segments, +- struct xilinx_aximcdma_tx_segment, node); ++ struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ +- if (chan->direction == DMA_MEM_TO_DEV) { +- segment->hw.control |= XILINX_MCDMA_BD_SOP; ++ if (xt->dir == DMA_MEM_TO_DEV) { ++ segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, +- struct xilinx_aximcdma_tx_segment, ++ struct xilinx_axidma_tx_segment, + node); +- segment->hw.control |= XILINX_MCDMA_BD_EOP; ++ segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + + error: + xilinx_dma_free_tx_descriptor(chan, desc); +- + return NULL; + } + +@@ -2441,23 +2137,15 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) + { + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; +- int err; + +- if (!chan->cyclic) { +- err = chan->stop_transfer(chan); +- if (err) { +- dev_err(chan->dev, "Cannot stop channel %p: %x\n", +- chan, dma_ctrl_read(chan, +- XILINX_DMA_REG_DMASR)); +- chan->err = true; +- } +- } ++ if (chan->cyclic) ++ xilinx_dma_chan_reset(chan); ++ ++ /* Halt the DMA engine */ ++ xilinx_dma_halt(chan); + +- xilinx_dma_chan_reset(chan); + /* Remove and free all of the descriptors in the lists */ +- chan->terminating = true; + xilinx_dma_free_descriptors(chan); +- chan->idle = true; + + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); +@@ -2466,22 +2154,11 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) + chan->cyclic = false; + } + +- if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) +- dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, +- XILINX_CDMA_CR_SGMODE); +- + return 0; + } + +-static void xilinx_dma_synchronize(struct dma_chan *dchan) +-{ +- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); +- +- tasklet_kill(&chan->tasklet); +-} +- + /** +- * xilinx_vdma_channel_set_config - Configure VDMA channel ++ * xilinx_dma_channel_set_config - Configure VDMA channel + * Run-time configuration for Axi VDMA, supports: + * . halt the channel + * . configure interrupt coalescing and inter-packet delay threshold +@@ -2511,16 +2188,12 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + chan->config.gen_lock = cfg->gen_lock; + chan->config.master = cfg->master; + +- dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; + if (cfg->gen_lock && chan->genlock) { + dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; +- dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; + dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; + } + + chan->config.frm_cnt_en = cfg->frm_cnt_en; +- chan->config.vflip_en = cfg->vflip_en; +- + if (cfg->park) + chan->config.park_frm = cfg->park_frm; + else +@@ -2530,13 +2203,11 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + chan->config.delay = cfg->delay; + + if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { +- dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; + dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; + chan->config.coalesc = cfg->coalesc; + } + + if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { +- dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; + dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; + chan->config.delay = cfg->delay; + } +@@ -2565,9 +2236,6 @@ static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_DMA_DMAXR_ALL_IRQ_MASK); + +- if (chan->irq > 0) +- free_irq(chan->irq, chan); +- + tasklet_kill(&chan->tasklet); + + list_del(&chan->common.device_node); +@@ -2582,8 +2250,11 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + *tmp_clk = NULL; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); +- if (IS_ERR(*axi_clk)) +- return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); ++ if (IS_ERR(*axi_clk)) { ++ err = PTR_ERR(*axi_clk); ++ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); ++ return err; ++ } + + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); + if (IS_ERR(*tx_clk)) +@@ -2599,25 +2270,25 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + + err = clk_prepare_enable(*axi_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_axiclk; + } + + err = clk_prepare_enable(*rx_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + goto err_disable_txclk; + } + + err = clk_prepare_enable(*sg_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); + goto err_disable_rxclk; + } + +@@ -2644,22 +2315,28 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + *tmp2_clk = NULL; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); +- if (IS_ERR(*axi_clk)) +- return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); ++ if (IS_ERR(*axi_clk)) { ++ err = PTR_ERR(*axi_clk); ++ dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); ++ return err; ++ } + + *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); +- if (IS_ERR(*dev_clk)) +- return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n"); ++ if (IS_ERR(*dev_clk)) { ++ err = PTR_ERR(*dev_clk); ++ dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); ++ return err; ++ } + + err = clk_prepare_enable(*axi_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*dev_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); + goto err_disable_axiclk; + } + +@@ -2678,8 +2355,11 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + int err; + + *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); +- if (IS_ERR(*axi_clk)) +- return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); ++ if (IS_ERR(*axi_clk)) { ++ err = PTR_ERR(*axi_clk); ++ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); ++ return err; ++ } + + *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); + if (IS_ERR(*tx_clk)) +@@ -2699,32 +2379,31 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, + + err = clk_prepare_enable(*axi_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", +- err); ++ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_axiclk; + } + + err = clk_prepare_enable(*txs_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); + goto err_disable_txclk; + } + + err = clk_prepare_enable(*rx_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + goto err_disable_txsclk; + } + + err = clk_prepare_enable(*rxs_clk); + if (err) { +- dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); ++ dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); + goto err_disable_rxclk; + } + +@@ -2758,15 +2437,16 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) + * + * @xdev: Driver specific device structure + * @node: Device node ++ * @chan_id: DMA Channel id + * + * Return: '0' on success and failure value on error + */ + static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, +- struct device_node *node) ++ struct device_node *node, int chan_id) + { + struct xilinx_dma_chan *chan; + bool has_dre = false; +- u32 value, width; ++ u32 value, chan_addr, width; + int err; + + /* Allocate and initialize the channel structure */ +@@ -2776,11 +2456,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + + chan->dev = xdev->dev; + chan->xdev = xdev; ++ chan->has_sg = xdev->has_sg; + chan->desc_pendingcount = 0x0; + chan->ext_addr = xdev->ext_addr; +- /* This variable ensures that descriptors are not +- * Submitted when dma engine is in progress. This variable is +- * Added to avoid polling for a bit in the status register to ++ /* This variable enusres that descripotrs are not ++ * Submited when dma engine is in progress. This variable is ++ * Added to avoid pollling for a bit in the status register to + * Know dma state in the driver hot path. + */ + chan->idle = true; +@@ -2796,6 +2477,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + + chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); + ++ /* The no interrupt coalesce param is optional*/ ++ chan->no_coalesce = of_property_read_bool(node, "xlnx,no-coalesce"); ++ + err = of_property_read_u32(node, "xlnx,datawidth", &value); + if (err) { + dev_err(xdev->dev, "missing xlnx,datawidth property\n"); +@@ -2803,25 +2487,45 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + } + width = value >> 3; /* Convert bits to bytes */ + ++ /* find the IRQ line, if it exists in the device tree */ ++ chan->irq = of_irq_get(node, 0); ++ if (chan->irq < 0) ++ return chan->irq; ++ ++ err = of_property_read_u32(node, "reg", &chan_addr); ++ if (!err) { ++ /* Allow the DT to specify the channel indexing */ ++ if (chan_addr >= XILINX_DMA_MAX_CHANS_PER_DEVICE) { ++ dev_err(xdev->dev, "Invalid address for channel %s: %d\n", ++ node->name, chan_addr); ++ return -EINVAL; ++ } ++ if (xdev->chan[chan_addr] != NULL) { ++ dev_err(xdev->dev, "Duplicate address for channel %s: %d\n", ++ node->name, chan_addr); ++ return -EINVAL; ++ } ++ chan_id = chan_addr; ++ } + /* If data width is greater than 8 bytes, DRE is not in hw */ + if (width > 8) + has_dre = false; + + if (!has_dre) +- xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); ++ xdev->common.copy_align = fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { + chan->direction = DMA_MEM_TO_DEV; ++ chan->id = chan_id; ++ chan->tdest = chan_id; + xdev->common.directions = BIT(DMA_MEM_TO_DEV); +- chan->id = xdev->mm2s_chan_id++; +- chan->tdest = chan->id; + + chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { +- chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + chan->config.park = 1; ++ chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + + if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) +@@ -2832,25 +2536,14 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + of_device_is_compatible(node, + "xlnx,axi-dma-s2mm-channel")) { + chan->direction = DMA_DEV_TO_MEM; ++ chan->id = chan_id; ++ chan->tdest = chan_id - xdev->nr_channels; + xdev->common.directions |= BIT(DMA_DEV_TO_MEM); +- chan->id = xdev->s2mm_chan_id++; +- chan->tdest = chan->id - xdev->dma_config->max_channels / 2; +- chan->has_vflip = of_property_read_bool(node, +- "xlnx,enable-vert-flip"); +- if (chan->has_vflip) { +- chan->config.vflip_en = dma_read(chan, +- XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & +- XILINX_VDMA_ENABLE_VERTICAL_FLIP; +- } +- +- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) +- chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; +- else +- chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + ++ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { +- chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + chan->config.park = 1; ++ chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + + if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) +@@ -2862,42 +2555,23 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + } + + /* Request the interrupt */ +- chan->irq = of_irq_get(node, chan->tdest); +- if (chan->irq < 0) +- return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n"); +- err = request_irq(chan->irq, xdev->dma_config->irq_handler, +- IRQF_SHARED, "xilinx-dma-controller", chan); ++ err = devm_request_irq(chan->dev, chan->irq, xilinx_dma_irq_handler, ++ IRQF_SHARED, "xilinx-dma-controller", chan); + if (err) { + dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); + return err; + } + +- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + chan->start_transfer = xilinx_dma_start_transfer; +- chan->stop_transfer = xilinx_dma_stop_transfer; +- } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { +- chan->start_transfer = xilinx_mcdma_start_transfer; +- chan->stop_transfer = xilinx_dma_stop_transfer; +- } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { ++ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) + chan->start_transfer = xilinx_cdma_start_transfer; +- chan->stop_transfer = xilinx_cdma_stop_transfer; +- } else { ++ else + chan->start_transfer = xilinx_vdma_start_transfer; +- chan->stop_transfer = xilinx_dma_stop_transfer; +- } +- +- /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */ +- if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { +- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA || +- dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & +- XILINX_DMA_DMASR_SG_MASK) +- chan->has_sg = true; +- dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, +- chan->has_sg ? "enabled" : "disabled"); +- } + + /* Initialize the tasklet */ +- tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet); ++ tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, ++ (unsigned long)chan); + + /* + * Initialize the DMA channel and add it to the DMA engine channels +@@ -2926,24 +2600,24 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + * @xdev: Driver specific device structure + * @node: Device node + * +- * Return: '0' on success and failure value on error. ++ * Return: 0 always. + */ + static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, +- struct device_node *node) +-{ +- int ret, i; +- u32 nr_channels = 1; ++ struct device_node *node) { ++ int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); +- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) ++ if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) { +- ret = xilinx_dma_chan_probe(xdev, node); ++ ret = xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + if (ret) + return ret; + } + ++ xdev->nr_channels += nr_channels; ++ + return 0; + } + +@@ -2960,7 +2634,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, + struct xilinx_dma_device *xdev = ofdma->of_dma_data; + int chan_id = dma_spec->args[0]; + +- if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id]) ++ if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) + return NULL; + + return dma_get_slave_channel(&xdev->chan[chan_id]->common); +@@ -2969,35 +2643,22 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, + static const struct xilinx_dma_config axidma_config = { + .dmatype = XDMA_TYPE_AXIDMA, + .clk_init = axidma_clk_init, +- .irq_handler = xilinx_dma_irq_handler, +- .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, + }; + +-static const struct xilinx_dma_config aximcdma_config = { +- .dmatype = XDMA_TYPE_AXIMCDMA, +- .clk_init = axidma_clk_init, +- .irq_handler = xilinx_mcdma_irq_handler, +- .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE, +-}; + static const struct xilinx_dma_config axicdma_config = { + .dmatype = XDMA_TYPE_CDMA, + .clk_init = axicdma_clk_init, +- .irq_handler = xilinx_dma_irq_handler, +- .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE, + }; + + static const struct xilinx_dma_config axivdma_config = { + .dmatype = XDMA_TYPE_VDMA, + .clk_init = axivdma_clk_init, +- .irq_handler = xilinx_dma_irq_handler, +- .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, + }; + + static const struct of_device_id xilinx_dma_of_ids[] = { + { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, + { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, + { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, +- { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, + {} + }; + MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); +@@ -3016,8 +2677,9 @@ static int xilinx_dma_probe(struct platform_device *pdev) + struct device_node *node = pdev->dev.of_node; + struct xilinx_dma_device *xdev; + struct device_node *child, *np = pdev->dev.of_node; ++ struct resource *io; + u32 num_frames, addr_width, len_width; +- int i, err; ++ int i, halt_mode, err; + + /* Allocate and initialize the DMA engine structure */ + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); +@@ -3041,31 +2703,25 @@ static int xilinx_dma_probe(struct platform_device *pdev) + return err; + + /* Request and map I/O memory */ +- xdev->regs = devm_platform_ioremap_resource(pdev, 0); +- if (IS_ERR(xdev->regs)) { +- err = PTR_ERR(xdev->regs); +- goto disable_clks; +- } ++ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ xdev->regs = devm_ioremap_resource(&pdev->dev, io); ++ if (IS_ERR(xdev->regs)) ++ return PTR_ERR(xdev->regs); ++ + /* Retrieve the DMA engine properties from the device tree */ +- xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); +- xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; +- +- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || +- xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { +- if (!of_property_read_u32(node, "xlnx,sg-length-width", +- &len_width)) { +- if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || +- len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { +- dev_warn(xdev->dev, +- "invalid xlnx,sg-length-width property value. Using default width\n"); +- } else { +- if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) +- dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); +- xdev->max_buffer_len = +- GENMASK(len_width - 1, 0); +- } +- } +- } ++ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); ++ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) ++ xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); ++ ++ xdev->halt_mode = XILINX_DMA_HALT_MODE_NORMAL; ++ halt_mode = of_property_match_string(node, ++ "xlnx,halt-mode", "reset-retry"); ++ if (halt_mode >= 0) ++ xdev->halt_mode = XILINX_DMA_HALT_MODE_RESET_RETRY; ++ halt_mode = of_property_match_string(node, ++ "xlnx,halt-mode", "reset-always"); ++ if (halt_mode >= 0) ++ xdev->halt_mode = XILINX_DMA_HALT_MODE_RESET_ALWAYS; + + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { + err = of_property_read_u32(node, "xlnx,num-fstores", +@@ -3073,7 +2729,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) + if (err < 0) { + dev_err(xdev->dev, + "missing xlnx,num-fstores property\n"); +- goto disable_clks; ++ return err; + } + + err = of_property_read_u32(node, "xlnx,flush-fsync", +@@ -3083,6 +2739,16 @@ static int xilinx_dma_probe(struct platform_device *pdev) + "missing xlnx,flush-fsync property\n"); + } + ++ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { ++ err = of_property_read_u32(node, "xlnx,lenwidth", &len_width); ++ if (err < 0) { ++ len_width = XILINX_DMA_MAX_LEN_REG_WIDTH; ++ } ++ len_width = max_t(u32, len_width, XILINX_DMA_MIN_LEN_REG_WIDTH); ++ len_width = min_t(u32, len_width, XILINX_DMA_MAX_LEN_REG_WIDTH); ++ xdev->max_length = GENMASK(len_width-1, 0); ++ } ++ + err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); + if (err < 0) + dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); +@@ -3093,46 +2759,45 @@ static int xilinx_dma_probe(struct platform_device *pdev) + xdev->ext_addr = false; + + /* Set the dma mask bits */ +- err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); +- if (err < 0) { +- dev_err(xdev->dev, "DMA mask error %d\n", err); +- goto disable_clks; +- } ++ dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); + + /* Initialize the DMA engine */ + xdev->common.dev = &pdev->dev; + ++ pdev->dev.dma_parms = &xdev->dma_parms; ++ + INIT_LIST_HEAD(&xdev->common.channels); + if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { + dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); + } + ++ xdev->common.dst_addr_widths = BIT(addr_width / 8); ++ xdev->common.src_addr_widths = BIT(addr_width / 8); + xdev->common.device_alloc_chan_resources = + xilinx_dma_alloc_chan_resources; + xdev->common.device_free_chan_resources = + xilinx_dma_free_chan_resources; + xdev->common.device_terminate_all = xilinx_dma_terminate_all; +- xdev->common.device_synchronize = xilinx_dma_synchronize; + xdev->common.device_tx_status = xilinx_dma_tx_status; + xdev->common.device_issue_pending = xilinx_dma_issue_pending; +- xdev->common.device_config = xilinx_dma_device_config; + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); + xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; +- /* Residue calculation is supported by only AXI DMA and CDMA */ ++ xdev->common.device_prep_interleaved_dma = ++ xilinx_dma_prep_interleaved; ++ /* Residue calculation is supported by only AXI DMA */ + xdev->common.residue_granularity = + DMA_RESIDUE_GRANULARITY_SEGMENT; ++ /* The DMA Driver breaks large descriptors into multiple ++ * segments ++ */ ++ dma_set_max_seg_size(xdev->common.dev, UINT_MAX); + } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); + xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; +- /* Residue calculation is supported by only AXI DMA and CDMA */ +- xdev->common.residue_granularity = +- DMA_RESIDUE_GRANULARITY_SEGMENT; +- } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { +- xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; + } else { + xdev->common.device_prep_interleaved_dma = + xilinx_vdma_dma_prep_interleaved; +@@ -3143,24 +2808,18 @@ static int xilinx_dma_probe(struct platform_device *pdev) + /* Initialize the channels */ + for_each_child_of_node(node, child) { + err = xilinx_dma_child_probe(xdev, child); +- if (err < 0) { +- of_node_put(child); +- goto error; +- } ++ if (err < 0) ++ goto disable_clks; + } + + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { +- for (i = 0; i < xdev->dma_config->max_channels; i++) ++ for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xdev->chan[i]->num_frms = num_frames; + } + + /* Register the DMA engine with the core */ +- err = dma_async_device_register(&xdev->common); +- if (err) { +- dev_err(xdev->dev, "failed to register the dma device\n"); +- goto error; +- } ++ dma_async_device_register(&xdev->common); + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); +@@ -3170,23 +2829,16 @@ static int xilinx_dma_probe(struct platform_device *pdev) + goto error; + } + +- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) +- dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); +- else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) +- dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); +- else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) +- dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); +- else +- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); ++ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); + + return 0; + ++disable_clks: ++ xdma_disable_allclks(xdev); + error: +- for (i = 0; i < xdev->dma_config->max_channels; i++) ++ for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xilinx_dma_chan_remove(xdev->chan[i]); +-disable_clks: +- xdma_disable_allclks(xdev); + + return err; + } +@@ -3206,7 +2858,7 @@ static int xilinx_dma_remove(struct platform_device *pdev) + + dma_async_device_unregister(&xdev->common); + +- for (i = 0; i < xdev->dma_config->max_channels; i++) ++ for (i = 0; i < xdev->nr_channels; i++) + if (xdev->chan[i]) + xilinx_dma_chan_remove(xdev->chan[i]); + +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index 0ee20b764000..5982dbbd8afc 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -461,6 +461,13 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) + dev->dma_mask = &dev->coherent_dma_mask; + return dma_set_mask_and_coherent(dev, mask); + } ++static inline void *dma_zalloc_coherent(struct device *dev, size_t size, ++ dma_addr_t *dma_handle, gfp_t flag) ++{ ++ void *ret = dma_alloc_coherent(dev, size, dma_handle, ++ flag | __GFP_ZERO); ++ return ret; ++} + + /** + * dma_addressing_limited - return if the device is addressing limited +-- +2.34.1 + diff --git a/recipes-kernel/linux/linux-xlnx/adrv-conf.cfg b/recipes-kernel/linux/linux-xlnx/adrv-conf.cfg new file mode 100644 index 0000000..8c4d781 --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/adrv-conf.cfg @@ -0,0 +1,15 @@ +CONFIG_ADRV9001=y +CONFIG_ADRV9001_COMMON_VERBOSE=y +CONFIG_ADRV9001_ARM_VERBOSE=y +CONFIG_ADRV9001_VALIDATE_PARAMS=y +CONFIG_ADRV9009=y +CONFIG_AXI_JESD204_TX=y +CONFIG_AXI_JESD204_RX=y +CONFIG_XILINX_TRANSCEIVER=y +CONFIG_XILINX_JESD204B=y +CONFIG_XILINX_JESD204B_PHY=y +CONFIG_JESD204=y +CONFIG_JESD204_TOP_DEVICE=y +CONFIG_AXI_ADXCVR=y +CONFIG_AXI_JESD204B=y +CONFIG_XILINX_TRANSCEIVER=y diff --git a/recipes-kernel/linux/linux-xlnx/drivers.cfg b/recipes-kernel/linux/linux-xlnx/drivers.cfg new file mode 100644 index 0000000..21ee5f5 --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/drivers.cfg @@ -0,0 +1,61 @@ +CONFIG_AD_SIGMA_DELTA=y +CONFIG_AD7173=y +CONFIG_AD7266=y +CONFIG_AD7291=y +CONFIG_AD7298=y +CONFIG_AD7476=y +CONFIG_AD7791=y +CONFIG_AD7793=y +CONFIG_AD7887=y +CONFIG_AD799X=y +CONFIG_ADM1177=y +CONFIG_AD9361=y +CONFIG_AD6676=y +CONFIG_AD9467=y +CONFIG_ADMC=y +CONFIG_CF_AXI_TDD=y +CONFIG_AD8366=y +CONFIG_AD5064=y +CONFIG_AD5360=y +CONFIG_AD5380=y +CONFIG_AD5421=y +CONFIG_AD5446=y +CONFIG_AD5449=y +CONFIG_AD5592R_BASE=y +CONFIG_AD5592R=y +CONFIG_AD5593R=y +CONFIG_AD5504=y +CONFIG_AD5624R_SPI=y +CONFIG_AD5686=y +CONFIG_AD5686_SPI=y +CONFIG_AD5755=y +CONFIG_AD5764=y +CONFIG_AD5791=y +CONFIG_AD7303=y +CONFIG_AD9523=y +CONFIG_AD9528=y +CONFIG_AD9548=y +CONFIG_AD9517=y +CONFIG_CF_AXI_DDS=y +CONFIG_CF_AXI_DDS_AD9122=y +CONFIG_CF_AXI_DDS_AD9144=y +# CONFIG_CF_AXI_DDS_AD9162 is not set +# CONFIG_CF_AXI_DDS_AD9172 is not set +CONFIG_CF_AXI_DDS_AD9739A=y +# CONFIG_CF_AXI_DDS_AD9783 is not set +# CONFIG_M2K_DAC is not set +CONFIG_ADF4350=y +CONFIG_ADF5355=y +CONFIG_ADIS16080=y +CONFIG_ADIS16130=y +CONFIG_ADIS16136=y +CONFIG_ADIS16260=y +CONFIG_ADXRS450=y +CONFIG_ADIS16400=y +CONFIG_ADIS16480=y +CONFIG_IIO_ADIS_LIB=y +CONFIG_IIO_ADIS_LIB_BUFFER=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_AD9361_EXT_BAND_CONTROL=y +# CONFIG_NVMEM_AXI_SYSID is not set diff --git a/recipes-kernel/linux/linux-xlnx/hdmi-imageon.cfg b/recipes-kernel/linux/linux-xlnx/hdmi-imageon.cfg new file mode 100644 index 0000000..cb5ed45 --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/hdmi-imageon.cfg @@ -0,0 +1,51 @@ +CONFIG_EXTRA_FIRMWARE="imageon_edid.bin" +CONFIG_VIDEO_IMAGEON_BRIDGE=y +CONFIG_VIDEO_AXI_HDMI_RX=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEOBUF2_VMALLOC=m +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_XILINX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +# CONFIG_VGASTATE is not set +CONFIG_HDMI=y +CONFIG_ADI_AXI_VIDEO_FRAME_BUFFER=y diff --git a/recipes-kernel/linux/linux-xlnx/xilinx_dma.cfg b/recipes-kernel/linux/linux-xlnx/xilinx_dma.cfg new file mode 100644 index 0000000..cad481b --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/xilinx_dma.cfg @@ -0,0 +1,5 @@ +CONFIG_XILINX_DMA=m +#CONFIG_XILINX_ZYNQMP_DMA is not seti +# CONFIG_DRM_ADI_AXI_HDMI is not seti +# CONFIG_XILINX_DMATEST is not set +# # CONFIG_XILINX_VDMATEST is not set diff --git a/recipes-kernel/linux/linux-xlnx/zynqMP.cfg b/recipes-kernel/linux/linux-xlnx/zynqMP.cfg new file mode 100644 index 0000000..a27a7f2 --- /dev/null +++ b/recipes-kernel/linux/linux-xlnx/zynqMP.cfg @@ -0,0 +1,5 @@ +CONFIG_XILINX_ZYNQMP_DMA=m +# CONFIG_DRM_ADI_AXI_HDMI is not set +# CONFIG_XILINX_DMATEST is not set +# CONFIG_XILINX_VDMATEST is not set +# CONFIG_XILINX_DMA is not set diff --git a/recipes-kernel/linux/linux-xlnx_%.bbappend b/recipes-kernel/linux/linux-xlnx_%.bbappend index b1bf8e1..141a607 100644 --- a/recipes-kernel/linux/linux-xlnx_%.bbappend +++ b/recipes-kernel/linux/linux-xlnx_%.bbappend @@ -14,7 +14,7 @@ KERNELURI = "git://github.com/analogdevicesinc/linux.git;protocol=https" # override kernel config file KBUILD_DEFCONFIG:versal = "xilinx_versal_defconfig" KBUILD_DEFCONFIG:zynmp = "adi_zynqmp_defconfig" -KBUILD_DEFCONFIG:zynq = "adi_zynq_defconfig" +KBUILD_DEFCONFIG:zynq = "zynq_xcomm_adv7511_defconfig" SRC_URI:append = " file://bsp.cfg" KERNEL_FEATURES:append = " bsp.cfg" @@ -24,7 +24,7 @@ FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:" SRC_URI:append = " file://bsp.cfg " KERNEL_FEATURES:append = " bsp.cfg " -SRC_URI:append = " file://fpgamgr.cfg \ +SRC_URI:append:zynq = " file://fpgamgr.cfg \ file://iio_axidmac.cfg \ file://kerneltracing.cfg \ file://netconsole.cfg \ @@ -32,6 +32,30 @@ SRC_URI:append = " file://fpgamgr.cfg \ file://0001-drivers-dma-Remove-partial-transfers-for-ADI-DMAC.patch \ file://0002-drivers-misc-mathworks-Update-get_irq-reset-tlast.patch \ file://0003-drivers-dma-xilinx-Add-chan-directions.patch \ + file://0004-industrialio-buffer-dma-improvements.patch \ + file://0005-Apply-patch-to-update-adv7604.c-for-720p30-support.patch \ + file://0006-patch-for-IPCore-Interrupt-driver.patch \ + file://0007-patch-xilinx-dma.patch \ + file://adrv-conf.cfg \ + file://hdmi-imageon.cfg \ + file://drivers.cfg \ + file://xilinx_dma.cfg \ + " +SRC_URI:append:zynqmp = " file://fpgamgr.cfg \ + file://iio_axidmac.cfg \ + file://kerneltracing.cfg \ + file://netconsole.cfg \ + file://usb_gadget.cfg \ + file://0001-drivers-dma-Remove-partial-transfers-for-ADI-DMAC.patch \ + file://0002-drivers-misc-mathworks-Update-get_irq-reset-tlast.patch \ + file://0003-drivers-dma-xilinx-Add-chan-directions.patch \ + file://0004-industrialio-buffer-dma-improvements.patch \ + file://0005-Apply-patch-to-update-adv7604.c-for-720p30-support.patch \ + file://0006-patch-for-IPCore-Interrupt-driver.patch \ + file://0007-patch-xilinx-dma.patch \ + file://adrv-conf.cfg \ + file://hdmi-imageon.cfg \ + file://drivers.cfg \ + file://zynqMP.cfg \ + file://xilinx_dma.cfg \ " - - diff --git a/recipes-support/cracklib/cracklib_2.9.10.bb b/recipes-support/cracklib/cracklib_2.9.10.bb new file mode 100644 index 0000000..fa08230 --- /dev/null +++ b/recipes-support/cracklib/cracklib_2.9.10.bb @@ -0,0 +1,39 @@ +# CrackLib BitBake recipe + +SUMMARY = "CrackLib is a library to enforce strong passwords" +DESCRIPTION = "CrackLib is a library that can be used to enforce strong passwords by preventing users from choosing passwords that are too simple." +HOMEPAGE = "https://github.com/cracklib/cracklib" +LICENSE = "LGPL-2.1-or-later" +LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=e3eda01d9815f8d24aae2dbd89b68b06" + +SRC_URI = "https://github.com/cracklib/cracklib/releases/download/v${PV}/cracklib-${PV}.tar.gz" +SRC_URI[md5sum] = "29e16b33cf4b2ed9c59b898594b9c399" +SRC_URI[sha256sum] = "6d7726ce2ab9ee35aa46a511f12badb059b3ceb7932c7fe64806d265b898aa63" + +# Replace with the actual version you are using +PV = "2.9.10" + +DEPENDS = "autoconf automake libtool" + +inherit autotools gettext + +# Specify any extra configuration options +EXTRA_OECONF = "--disable-static" + +do_install:append() { + # Install additional files if necessary + #install -d ${D}${datadir}/cracklib + #install -m 0644 ${S}/cracklib-words ${D}${datadir}/cracklib/ +} + +#FILES_${PN} += "${datadir}/cracklib" + +# If there are any post-installation tasks +# pkg_postinst_${PN}() { +# # Post-install script commands +# } + +# If there are any post-removal tasks +# pkg_postrm_${PN}() { +# # Post-removal script commands +# } diff --git a/recipes-support/libad9361-iio/libad9361-iio_0.2.bb b/recipes-support/libad9361-iio/libad9361-iio_0.2.bb new file mode 100644 index 0000000..f4523e6 --- /dev/null +++ b/recipes-support/libad9361-iio/libad9361-iio_0.2.bb @@ -0,0 +1,17 @@ +SUMMARY = "Library to manage multi-chip sync on FMCOMMS5 platforms with multiple AD9361 devices" +SECTION = "libs" +LICENSE = "LGPL-2.1-or-later" +LIC_FILES_CHKSUM = "file://LICENSE;md5=40d2542b8c43a3ec2b7f5da31a697b88" +BRANCH = "main" + +# If we are in an offline build we cannot use AUTOREV since it would require internet! +SRCREV = "${@ "39a039215c9577da19dee838faca8ce0addcb7d6" if bb.utils.to_boolean(d.getVar('BB_NO_NETWORK')) else d.getVar('AUTOREV')}" +PV:append = "+git${SRCPV}" + +SRC_URI = "git://github.com/analogdevicesinc/libad9361-iio.git;protocol=https;branch=${BRANCH}" + +S = "${WORKDIR}/git" + +DEPENDS = "libiio" + +inherit cmake pkgconfig diff --git a/recipes-support/libiio/libiio_%.bbappend b/recipes-support/libiio/libiio_%.bbappend index a1e4dbd..c4356af 100644 --- a/recipes-support/libiio/libiio_%.bbappend +++ b/recipes-support/libiio/libiio_%.bbappend @@ -1,6 +1,6 @@ FILESEXTRAPATHS:prepend := "${THISDIR}/files:" BRANCH ?= "libiio-v0" -SRCREV = "${@ "0d8a69aaf2f064cafaa9a962308f679f9b8fa982" if bb.utils.to_boolean(d.getVar('BB_NO_NETWORK')) else d.getVar('AUTOREV')}" +SRCREV = "${@ "b6028fdeef888ab45f7c1dd6e4ed9480ae4b55e3"}" # Just overwrite SRC_URI as we would need to delete the python bindings patch since it does not apply # (already fixed in 0.24) and we do not want to hardcode ';branch=master' so that we would also have to # remove that leaving the variable empty anyways.